repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nikitasingh981/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12291 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
dpinney/omf | omf/scratch/Neural_Net_Experimentation/peak forecast model/peakForecast_m.py | 1 | 3921 | ''' Calculate the costs and benefits of energy storage from a distribution utility perspective. '''
import os, sys, shutil, csv
from datetime import datetime as dt, timedelta
from os.path import isdir, join as pJoin
import pandas as pd
from omf.models import __neoMetaModel__
from __neoMetaModel__ import *
from omf import forecast as lf
from omf import peakForecast as pf
# Model metadata:
modelName, template = metadata(__file__)
tooltip = "This model predicts whether the following day will be a monthly peak."
hidden = True
def work(modelDir, ind):
''' Model processing done here. '''
epochs = int(ind['epochs'])
o = {} # See bottom of file for out's structure
o['max_c'] = float(ind['max_c'])
try:
with open(pJoin(modelDir, 'hist.csv'), 'w') as f:
f.write(ind['histCurve'].replace('\r', ''))
df = pd.read_csv(pJoin(modelDir, 'hist.csv'))
assert df.shape[0] >= 26280 # must be longer than 3 years
df['dates'] = df.apply(
lambda x: dt(
int(x['year']),
int(x['month']),
int(x['day']),
int(x['hour'])),
axis=1
)
df['dayOfYear'] = df['dates'].dt.dayofyear
except:
raise Exception("CSV file is incorrect format.")
this_year = max(list(df.year.unique()))
o['startDate'] = "{}-01-01".format(this_year)
# ---------------------- MAKE PREDICTIONS ------------------------------- #
d_dict = pf.dispatch_strategy(df, EPOCHS=epochs)
df_dispatch = d_dict['df_dispatch']
# -------------------- MODEL ACCURACY ANALYSIS -------------------------- #
# load forecasting accuracy
demand = df_dispatch['load']
o['demand'] = list(demand)
o['one_day'] = list(df_dispatch['1-day'])
o['two_day'] = list(df_dispatch['2-day'])
o['one_day_train'] = 100 - round(d_dict['1-day_accuracy']['train'], 2)
o['one_day_test'] = 100 - round(d_dict['1-day_accuracy']['test'], 2)
o['two_day_train'] = 100 - round(d_dict['2-day_accuracy']['train'], 2)
o['two_day_test'] = 100 - round(d_dict['2-day_accuracy']['test'], 2)
# peak forecasting accuracy
df_conf = pf.confidence_dispatch(df_dispatch, max_c=o['max_c'])
# o['precision_g'] = list(df_conf['precision'])
# o['recall_g'] = list(df_conf['recall'])
# o['peaks_missed_g'] = list(df_conf['peaks_missed'])
# o['unnecessary_dispatches_g'] = list(df_conf['unnecessary_dispatches'])
ans = df_dispatch['should_dispatch']
pre = df_dispatch['dispatch']
days = [(dt(this_year, 1, 1)+timedelta(days=1)*i) for i, _ in enumerate(pre)]
tp = list((ans & pre)*demand)
fp = list(((~ans) & pre)*demand)
fn = list((ans & (~pre)*demand))
o['true_positive'] = [[str(i)[:-9] + "T20:15:00.441844", j] for i, j in zip(days, tp)]
o['false_positive'] = [[str(i)[:-9] + "T20:15:00.441844", j] for i, j in zip(days,fp)]
o['false_negative'] = [[str(i)[:-9] + "T20:15:00.441844", j] for i, j in zip(days, fn)]
# recommended confidence
d = pf.find_lowest_confidence(df_conf)
o['lowest_confidence'] = d['confidence']
o['lowest_dispatch'] = d['unnecessary_dispatches']
print(len(o['true_positive']), o['true_positive'])
o['stderr'] = ''
return o
def new(modelDir):
''' Create a new instance of this model. Returns true on success, false on failure. '''
defaultInputs = {
'created': '2015-06-12 17:20:39.308239',
'modelType': modelName,
'runTime': '0:01:03',
'epochs': '1',
'max_c': '0.1',
'histFileName': 'd_Texas_17yr_TempAndLoad.csv',
"histCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","d_Texas_17yr_TempAndLoad.csv"), 'rU').read(),
}
return __neoMetaModel__.new(modelDir, defaultInputs)
def _tests():
modelLoc = pJoin(__neoMetaModel__._omfDir,'data','Model','admin','Automated Testing of ' + modelName)
# Blow away old test results if necessary.
if isdir(modelLoc):
shutil.rmtree(modelLoc)
new(modelLoc) # Create New.
renderAndShow(modelLoc) # Pre-run.
runForeground(modelLoc) # Run the model.
renderAndShow(modelLoc) # Show the output.
if __name__ == '__main__':
_tests()
| gpl-2.0 |
pkruskal/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
mbkumar/pymatgen | pymatgen/phonon/plotter.py | 1 | 23979 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements plotter for DOS and band structure.
"""
import logging
from collections import OrderedDict, namedtuple
import numpy as np
import scipy.constants as const
from monty.json import jsanitize
from pymatgen.electronic_structure.plotter import plot_brillouin_zone
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.util.plotting import pretty_plot, add_fig_kwargs, get_ax_fig_plt
logger = logging.getLogger(__name__)
FreqUnits = namedtuple("FreqUnits", ["factor", "label"])
def freq_units(units):
"""
Args:
units: str, accepted values: thz, ev, mev, ha, cm-1, cm^-1
Returns:
Returns conversion factor from THz to the requred units and the label in the form of a namedtuple
"""
d = {"thz": FreqUnits(1, "THz"),
"ev": FreqUnits(const.value("hertz-electron volt relationship") * const.tera, "eV"),
"mev": FreqUnits(const.value("hertz-electron volt relationship") * const.tera / const.milli, "meV"),
"ha": FreqUnits(const.value("hertz-hartree relationship") * const.tera, "Ha"),
"cm-1": FreqUnits(const.value("hertz-inverse meter relationship") * const.tera * const.centi, "cm^{-1}"),
'cm^-1': FreqUnits(const.value("hertz-inverse meter relationship") * const.tera * const.centi, "cm^{-1}")
}
try:
return d[units.lower().strip()]
except KeyError:
raise KeyError('Value for units `{}` unknown\nPossible values are:\n {}'.format(units, list(d.keys())))
class PhononDosPlotter:
"""
Class for plotting phonon DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = PhononDosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompletePhononDos.get_element_dos().
"""
def __init__(self, stack=False, sigma=None):
"""
Args:
stack: Whether to plot the DOS as a stacked area graph
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object
"""
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'frequencies':..,
'densities': ...}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None, units="thz"):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
u = freq_units(units)
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies'] * u.factor
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
plt.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel(r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label))
plt.ylabel(r'$\mathrm{Density\ of\ states}$')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None, units="thz"):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1
"""
plt = self.get_plot(xlim, ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close()
def show(self, xlim=None, ylim=None, units="thz"):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(xlim, ylim, units=units)
plt.show()
class PhononBSPlotter:
"""
Class to plot or get data to facilitate the plot of band structure objects.
"""
def __init__(self, bs):
"""
Args:
bs: A PhononBandStructureSymmLine object.
"""
if not isinstance(bs, PhononBandStructureSymmLine):
raise ValueError(
"PhononBSPlotter only works with PhononBandStructureSymmLine objects. "
"A PhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self):
"""
Get the data nicely formatted for a plot
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a qpoint (the
x axis) and the labels (None if no label)
frequencies: A list (one element for each branch) of frequencies for
each qpoint: [branch][qpoint][mode]. The data is
stored by branch to facilitate the plotting
lattice: The reciprocal lattice.
"""
distance = []
frequency = []
ticks = self.get_ticks()
for b in self._bs.branches:
frequency.append([])
distance.append([self._bs.distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
for i in range(self._nb_bands):
frequency[-1].append(
[self._bs.bands[i][j]
for j in range(b['start_index'], b['end_index'] + 1)])
return {'ticks': ticks, 'distances': distance, 'frequency': frequency,
'lattice': self._bs.lattice_rec.as_dict()}
def get_plot(self, ylim=None, units="thz"):
"""
Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
u = freq_units(units)
plt = pretty_plot(12, 8)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['frequency'][d][i][j] * u.factor
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
self._maketicks(plt)
# plot y=0 line
plt.axhline(0, linewidth=1, color='k')
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label)
plt.ylabel(ylabel, fontsize=30)
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, ylim=None, units="thz"):
"""
Show the plot using matplotlib.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(ylim, units=units)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None, units="thz"):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(ylim=ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs.qpoints[0].label
previous_branch = self._bs.branches[0]['name']
for i, c in enumerate(self._bs.qpoints):
if c.label is not None:
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter, units="thz"):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
u = freq_units(units)
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig['distances']) != len(data['distances']):
raise ValueError('The two objects are not compatible.')
plt = self.get_plot(units=units)
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[data['frequency'][d][i][j] * u.factor
for j in range(len(data_orig['distances'][d]))],
'r-', linewidth=band_linewidth)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
# get labels and lines
labels = {}
for q in self._bs.qpoints:
if q.label:
labels[q.label] = q.frac_coords
lines = []
for b in self._bs.branches:
lines.append([self._bs.qpoints[b['start_index']].frac_coords,
self._bs.qpoints[b['end_index']].frac_coords])
plot_brillouin_zone(self._bs.lattice_rec, lines=lines, labels=labels)
class ThermoPlotter:
"""
Plotter for thermodynamic properties obtained from phonon DOS.
If the structure corresponding to the DOS, it will be used to extract the forumla unit and provide
the plots in units of mol instead of mole-cell
"""
def __init__(self, dos, structure=None):
"""
Args:
dos: A PhononDos object.
structure: A Structure object corresponding to the structure used for the calculation.
"""
self.dos = dos
self.structure = structure
def _plot_thermo(self, func, temperatures, factor=1, ax=None, ylabel=None, label=None, ylim=None, **kwargs):
"""
Plots a thermodynamic property for a generic function from a PhononDos instance.
Args:
func: the thermodynamic function to be used to calculate the property
temperatures: a list of temperatures
factor: a multiplicative factor applied to the thermodynamic property calculated. Used to change
the units.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
ylabel: label for the y axis
label: label of the plot
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
values = []
for t in temperatures:
values.append(func(t, structure=self.structure) * factor)
ax.plot(temperatures, values, label=label, **kwargs)
if ylim:
ax.set_ylim(ylim)
ax.set_xlim((np.min(temperatures), np.max(temperatures)))
ylim = plt.ylim()
if ylim[0] < 0 < ylim[1]:
plt.plot(plt.xlim(), [0, 0], 'k-', linewidth=1)
ax.set_xlabel(r"$T$ (K)")
if ylabel:
ax.set_ylabel(ylabel)
return fig
@add_fig_kwargs
def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the constant volume specific heat C_v in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$C_v$ (J/K/mol)"
else:
ylabel = r"$C_v$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig
@add_fig_kwargs
def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational entrpy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$S$ (J/K/mol)"
else:
ylabel = r"$S$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig
@add_fig_kwargs
def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational internal energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta E$ (kJ/mol)"
else:
ylabel = r"$\Delta E$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig
@add_fig_kwargs
def plot_helmholtz_free_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational contribution to the Helmoltz free energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta F$ (kJ/mol)"
else:
ylabel = r"$\Delta F$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig
@add_fig_kwargs
def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots all the thermodynamic properties in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
mol = "" if self.structure else "-c"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel="Thermodynamic properties", ylim=ylim,
label=r"$C_v$ (J/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0],
label=r"$S$ (J/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.internal_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,
label=r"$\Delta E$ (kJ/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,
label=r"$\Delta F$ (kJ/mol{})".format(mol), **kwargs)
fig.axes[0].legend(loc="best")
return fig
| mit |
BIRDY-obspm/DOCKing_System | Module/Simulation/Trajectory/pykep/PyKEP/trajopt/_pl2pl_N_impulses.py | 5 | 8898 | from PyGMO.problem import base as base_problem
from PyKEP.core import epoch, DAY2SEC, lambert_problem, propagate_lagrangian, SEC2DAY, AU, ic2par
from PyKEP.planet import jpl_lp
from math import pi, cos, sin, log, acos
from scipy.linalg import norm
class pl2pl_N_impulses(base_problem):
"""
This class is a PyGMO (http://esa.github.io/pygmo/) problem representing a single leg transfer
between two planets allowing up to a maximum number of impulsive Deep Space Manouvres.
The decision vector is::
[t0,T] + [alpha,u,v,V_inf]*(N-2) +[alpha] + ([tf])
... in the units: [mjd2000, days] + [nd, nd, m/sec, nd] + [nd] + [mjd2000]
Each leg time-of-flight can be decoded as follows, T_n = T log(alpha_n) / \sum_i(log(alpha_i))
.. note::
The resulting problem is box-bounded (unconstrained). The resulting trajectory is time-bounded.
"""
def __init__(self,
start=jpl_lp('earth'),
target=jpl_lp('venus'),
N_max=3,
tof=[20., 400.],
vinf=[0., 4.],
phase_free=True,
multi_objective=False,
t0=None
):
"""
prob = PyKEP.trajopt.pl2pl_N_impulses(start=jpl_lp('earth'), target=jpl_lp('venus'), N_max=3, tof=[20., 400.], vinf=[0., 4.], phase_free=True, multi_objective=False, t0=None)
- start: a PyKEP planet defining the starting orbit
- target: a PyKEP planet defining the target orbit
- N_max: maximum number of impulses
- tof: a list containing the box bounds [lower,upper] for the time of flight (days)
- vinf: a list containing the box bounds [lower,upper] for each DV magnitude (km/sec)
- phase_free: when True, no randezvous condition is enforced and the final orbit will be reached at an optimal true anomaly
- multi_objective: when True, a multi-objective problem is constructed with DV and time of flight as objectives
- t0: launch window defined as a list of two epochs [epoch,epoch]
"""
# Sanity checks
# 1) all planets need to have the same mu_central_body
if (start.mu_central_body != target.mu_central_body):
raise ValueError('Starting and ending PyKEP.planet must have the same mu_central_body')
# 2) Number of impulses must be at least 2
if N_max < 2:
raise ValueError('Number of impulses N is less than 2')
# 3) If phase_free is True, t0 does not make sense
if (t0 is None and not phase_free):
t0 = [epoch(0), epoch(1000)]
if (t0 is not None and phase_free):
raise ValueError('When phase_free is True no t0 can be specified')
# We compute the PyGMO problem dimensions
dim = 2 + 4 * (N_max - 2) + 1 + phase_free
obj_dim = multi_objective + 1
# First we call the constructor for the base PyGMO problem
# As our problem is n dimensional, box-bounded (may be multi-objective), we write
# (dim, integer dim, number of obj, number of con, number of inequality con, tolerance on con violation)
super(pl2pl_N_impulses, self).__init__(dim, 0, obj_dim, 0, 0, 0)
# We then define all class data members
self.start = start
self.target = target
self.N_max = N_max
self.phase_free = phase_free
self.multi_objective = multi_objective
self.__common_mu = start.mu_central_body
# And we compute the bounds
if phase_free:
lb = [start.ref_epoch.mjd2000, tof[0]] + [0.0, 0.0, 0.0, vinf[0] * 1000] * (N_max - 2) + [0.0] + [target.ref_epoch.mjd2000]
ub = [start.ref_epoch.mjd2000 + 2 * start.period * SEC2DAY, tof[1]] + [1.0, 1.0, 1.0, vinf[1] * 1000] * (N_max - 2) + [1.0] + [target.ref_epoch.mjd2000 + 2 * target.period * SEC2DAY]
else:
lb = [t0[0].mjd2000, tof[0]] + [0.0, 0.0, 0.0, vinf[0] * 1000] * (N_max - 2) + [0.0]
ub = [t0[1].mjd2000, tof[1]] + [1.0, 1.0, 1.0, vinf[1] * 1000] * (N_max - 2) + [1.0]
# And we set them
self.set_bounds(lb, ub)
# Objective function
def _objfun_impl(self, x):
# 1 - we 'decode' the chromosome recording the various deep space
# manouvres timing (days) in the list T
T = list([0] * (self.N_max - 1))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
# 2 - We compute the starting and ending position
r_start, v_start = self.start.eph(epoch(x[0]))
if self.phase_free:
r_target, v_target = self.target.eph(epoch(x[-1]))
else:
r_target, v_target = self.target.eph(epoch(x[0] + x[1]))
# 3 - We loop across inner impulses
rsc = r_start
vsc = v_start
for i, time in enumerate(T[:-1]):
theta = 2 * pi * x[3 + 4 * i]
phi = acos(2 * x[4 + 4 * i] - 1) - pi / 2
Vinfx = x[5 + 4 * i] * cos(phi) * cos(theta)
Vinfy = x[5 + 4 * i] * cos(phi) * sin(theta)
Vinfz = x[5 + 4 * i] * sin(phi)
# We apply the (i+1)-th impulse
vsc = [a + b for a, b in zip(vsc, [Vinfx, Vinfy, Vinfz])]
rsc, vsc = propagate_lagrangian(
rsc, vsc, T[i] * DAY2SEC, self.__common_mu)
cw = (ic2par(rsc, vsc, self.start.mu_central_body)[2] > pi / 2)
# We now compute the remaining two final impulses
# Lambert arc to reach seq[1]
dt = T[-1] * DAY2SEC
l = lambert_problem(rsc, r_target, dt, self.__common_mu, cw, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
DV1 = norm([a - b for a, b in zip(v_beg_l, vsc)])
DV2 = norm([a - b for a, b in zip(v_end_l, v_target)])
DV_others = sum(x[5::4])
if self.f_dimension == 1:
return (DV1 + DV2 + DV_others,)
else:
return (DV1 + DV2 + DV_others, x[1])
def plot(self, x, ax=None):
"""
ax = prob.plot(x, ax=None)
- x: encoded trajectory
- ax: matplotlib axis where to plot. If None figure and axis will be created
- [out] ax: matplotlib axis where to plot
Plots the trajectory represented by a decision vector x on the 3d axis ax
Example::
ax = prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
if ax is None:
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
axis.scatter(0, 0, 0, color='y')
# 1 - we 'decode' the chromosome recording the various deep space
# manouvres timing (days) in the list T
T = list([0] * (self.N_max - 1))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
# 2 - We compute the starting and ending position
r_start, v_start = self.start.eph(epoch(x[0]))
if self.phase_free:
r_target, v_target = self.target.eph(epoch(x[-1]))
else:
r_target, v_target = self.target.eph(epoch(x[0] + x[1]))
plot_planet(self.start, t0=epoch(x[0]), color=(0.8, 0.6, 0.8), legend=True, units = AU, ax=axis)
plot_planet(self.target, t0=epoch(x[0] + x[1]), color=(0.8, 0.6, 0.8), legend=True, units = AU, ax=axis)
# 3 - We loop across inner impulses
rsc = r_start
vsc = v_start
for i, time in enumerate(T[:-1]):
theta = 2 * pi * x[3 + 4 * i]
phi = acos(2 * x[4 + 4 * i] - 1) - pi / 2
Vinfx = x[5 + 4 * i] * cos(phi) * cos(theta)
Vinfy = x[5 + 4 * i] * cos(phi) * sin(theta)
Vinfz = x[5 + 4 * i] * sin(phi)
# We apply the (i+1)-th impulse
vsc = [a + b for a, b in zip(vsc, [Vinfx, Vinfy, Vinfz])]
plot_kepler(rsc, vsc, T[
i] * DAY2SEC, self.__common_mu, N=200, color='b', legend=False, units=AU, ax=axis)
rsc, vsc = propagate_lagrangian(
rsc, vsc, T[i] * DAY2SEC, self.__common_mu)
cw = (ic2par(rsc, vsc, self.start.mu_central_body)[2] > pi / 2)
# We now compute the remaining two final impulses
# Lambert arc to reach seq[1]
dt = T[-1] * DAY2SEC
l = lambert_problem(rsc, r_target, dt, self.__common_mu, cw, False)
plot_lambert(
l, sol=0, color='r', legend=False, units=AU, ax=axis, N=200)
plt.show()
return axis
| lgpl-3.0 |
febert/DeepRL | ddpg_working/dashboard.py | 1 | 5607 | #!/usr/bin/env python
import matplotlib
matplotlib.use("Agg")
import warnings
warnings.filterwarnings("ignore", module="matplotlib")
from IPython.display import display
from ipywidgets import widgets
import matplotlib.pyplot as plt
import time
import thread
import numpy as np
import json
import shutil
import cStringIO
import webbrowser
import os
import subprocess
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('exdir',os.getenv('DB_EXDIR',''),'path containing output folders')
flags.DEFINE_boolean('browser',os.getenv('DB_BROWSER','True')=='True','create new jupyter browser tabs')
PORT_DB = "8007"
PORT_IP = "8008"
PORT_TB = "8009"
def main():
print('Experiment root folder at: ' + FLAGS.exdir)
free_port(PORT_DB)
free_port(PORT_IP)
free_port(PORT_TB)
subprocess.Popen(['jupyter','notebook', '--no-browser', '--port='+PORT_IP, FLAGS.exdir])
scriptdir = os.path.dirname(__file__)
browser = "" if FLAGS.browser else "--no-browser"
os.environ["DB_EXDIR"] = FLAGS.exdir
os.environ["DB_BROWSER"] = 'True' if FLAGS.browser else 'False'
os.system('jupyter notebook --port='+ PORT_DB + ' ' + scriptdir + ' ' + browser)
def free_port(port):
import signal
for lsof in ["lsof","/usr/sbin/lsof"]:
try:
out = subprocess.check_output([lsof,"-t","-i:" + port])
for l in out.splitlines():
pid = int(l)
os.kill(pid,signal.SIGTERM)
# print("Killed process " + str(pid) + " to free port " + port)
break
except subprocess.CalledProcessError:
pid = -1
except OSError:
pass
# TODO: remove ?
def load(pattern):
import glob
import numpy as np
data = [np.load(f) for f in glob.glob(FLAGS.exdir+'/'+pattern)]
return data
def dashboard(max=10):
main_view = widgets.VBox()
display(main_view)
def loop():
views = {}
while True:
try:
views2 = {}
todisplay = []
i = 0
dirs = os.listdir(FLAGS.exdir)
dirs.sort(reverse=True)
for e in dirs:
if i == max: break
i = i+1
v = views[e] if e in views else ExpView(e)
v.update()
todisplay = todisplay + [v.view]
views2[e] = v
main_view.children = todisplay
views = views2
time.sleep(1.)
except Exception as ex:
print(ex)
pass
thread.start_new_thread(loop,())
class ExpView:
def __init__(self, name):
self.outdir = FLAGS.exdir + '/' + name
style_hlink = '<style>.hlink{padding: 5px 10px 5px 10px;display:inline-block;}</style>'
bname = widgets.HTML(style_hlink +
'<a class=hlink target="_blank"'+
'href="http://localhost:'+ PORT_IP +'/tree/'+ name +'"> '+
name + ' </a> ')
# self.env = widgets.Button()
self.run_status = widgets.Button()
killb = widgets.Button(description='kill')
delb = widgets.Button(description='delete')
killb.on_click(lambda _,self=self: exp_kill(self.outdir))
def delf(_,self=self):
self.delete()
exp_delete(self.outdir)
delb.on_click(delf)
self.plot = widgets.Image(format='png')
tbb = widgets.Button(description='tensorboard')
tbbb = widgets.HTML(style_hlink+'<a class=hlink target="_blank" href="http://localhost:'+ PORT_TB +'"> (open) </a> ')
def ontb(_,self=self):
free_port(PORT_TB)
subprocess.Popen(['tensorboard','--port', PORT_TB, '--logdir', self.outdir])
if FLAGS.browser:
webbrowser.open_new_tab('http://localhost:'+ PORT_TB)
tbb.on_click(ontb)
self.bar = widgets.HBox((bname, self.run_status,tbb,tbbb,killb,delb))
self.view = widgets.VBox((self.bar,self.plot,widgets.HTML('<br><br>')))
self.th_stop = False
def loop_fig(self=self):
while not self.th_stop:
try:
# update plot
try:
x = np.load(self.outdir+'/returns.npy')
except:
x = np.zeros([1,2])
f,ax = plt.subplots()
f.set_size_inches((15,2.5))
f.set_tight_layout(True)
ax.plot(x[:,0],x[:,1])
#ax.plot(i,r)
sio = cStringIO.StringIO()
f.savefig(sio, format='png',dpi=60)
self.plot.value = sio.getvalue()
sio.close()
plt.close(f)
except:
pass
self.th = thread.start_new_thread(loop_fig,())
def update(self):
try:
# update labels
x = xread(self.outdir)
job = x.get('job',False)
if job:
rt = 'job'
jid = x['job_id']
try:
out = subprocess.check_output("squeue --job {} -o %%T".format(jid).split(' '),stderr=subprocess.STDOUT)
rs = out[6:]
if rs == "": rs = "dead"
except:
rs = "dead"
else:
rt = 'local'
rs = x['run_status']
# flags = x.get('__flags') or x.get('flags')
# self.env.description = flags.get('env','')
self.run_status.description = rt + ": " + rs
except:
pass
def delete(self):
self.th_stop = True
def xwrite(path,data):
with open(path+'/ezex.json','w+') as f:
json.dump(data,f)
def xread(path):
with open(path+'/ezex.json') as f:
return json.load(f)
def exp_kill(outdir):
''' try to stop experiment slurm job with destination <outdir> '''
try:
x = xread(outdir)
jid = x['job_id']
cmd = 'scancel '+str(jid)
subprocess.check_output(cmd,shell=True)
except Exception:
return False
def exp_delete(outdir):
exp_kill(outdir)
shutil.rmtree(outdir,ignore_errors=False)
if __name__ == '__main__':
main() | gpl-3.0 |
pnedunuri/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
jcchin/MagnePlane | src/hyperloop/Python/pod/pod_group.py | 4 | 9844 | """
Group for Pod components containing the following components:
Cycle Group, Pod Mach (Aero), DriveTrain group, Geometry, Levitation group, and Pod Mass
"""
from openmdao.api import Component, Group, Problem, IndepVarComp
from hyperloop.Python.pod.drag import Drag
from hyperloop.Python.pod.pod_mass import PodMass
from hyperloop.Python.pod.drivetrain.drivetrain import Drivetrain
from hyperloop.Python.pod.pod_mach import PodMach
from hyperloop.Python.pod.cycle.cycle_group import Cycle
from hyperloop.Python.pod.pod_geometry import PodGeometry
from hyperloop.Python.pod.magnetic_levitation.levitation_group import LevGroup
from openmdao.api import Newton, ScipyGMRES
from openmdao.units.units import convert_units as cu
import numpy as np
import matplotlib.pylab as plt
class PodGroup(Group):
"""TODOs
Params
------
pod_mach : float
Vehicle mach number (unitless)
tube_pressure : float
Tube total pressure (Pa)
tube_temp : float
Tube total temperature (K)
comp.map.PRdes : float
Pressure ratio of compressor (unitless)
nozzle.Ps_exhaust : float
Exit pressure of nozzle (psi)
comp_inlet_area : float
Inlet area of compressor. (m**2)
des_time : float
time until design power point (h)
time_of_flight : float
total mission time (h)
motor_max_current : float
max motor phase current (A)
motor_LD_ratio : float
length to diameter ratio of motor (unitless)
motor_oversize_factor : float
scales peak motor power by this figure
inverter_efficiency : float
power out / power in (W)
battery_cross_section_area : float
cross_sectional area of battery used to compute length (cm^2)
n_passengers : float
Number of passengers per pod. Default value is 28
A_payload : float
Cross sectional area of passenger compartment. Default value is 2.72
vel_b : float
desired breakpoint levitation speed (m/s)
h_lev : float
Levitation height. Default value is .01
vel : float
desired magnetic drag speed (m/s)
Returns
-------
nozzle.Fg : float
Nozzle thrust (lbf)
inlet.F_ram : float
Ram drag (lbf)
nozzle.Fl_O:tot:T : float
Total temperature at nozzle exit (degR)
nozzle.Fl_O:stat:W : float
Total mass flow rate at nozzle exit (lbm/s)
A_tube : float
will return optimal tunnel area based on pod Mach number
S : float
Platform area of the pod
mag_drag : float
magnetic drag from levitation system (N)
total_pod_mass : float
Pod Mass (kg)
References
----------
.. [1] Friend, Paul. Magnetic Levitation Train Technology 1. Thesis.
Bradley University, 2004. N.p.: n.p., n.d. Print.
"""
def __init__(self):
super(PodGroup, self).__init__()
self.add('drag', Drag(), promotes = ['pod_mach', 'Cd'])
self.add('cycle', Cycle(), promotes=['comp.map.PRdes', 'nozzle.Ps_exhaust', 'comp_inlet_area',
'nozzle.Fg', 'inlet.F_ram', 'nozzle.Fl_O:tot:T', 'nozzle.Fl_O:stat:W',
'tube_pressure', 'tube_temp'])
self.add('pod_mach', PodMach(), promotes=['A_tube'])
self.add('drivetrain', Drivetrain(), promotes=['des_time', 'time_of_flight', 'motor_max_current', 'motor_LD_ratio',
'inverter_efficiency', 'motor_oversize_factor', 'battery_cross_section_area'])
self.add('pod_geometry', PodGeometry(), promotes=['A_payload', 'n_passengers', 'S', 'L_pod'])
self.add('levitation_group', LevGroup(), promotes=['vel_b', 'h_lev', 'vel', 'mag_drag', 'total_pod_mass'])
self.add('pod_mass', PodMass())
# Connects pod group level variables to downstream components
self.connect('pod_mach', ['pod_mach.M_pod', 'cycle.pod_mach'])
self.connect('tube_pressure', 'pod_mach.p_tube')
self.connect('tube_temp', 'pod_mach.T_ambient')
self.connect('n_passengers', 'pod_mass.n_passengers')
self.connect('comp_inlet_area', 'pod_mach.comp_inlet_area')
self.connect('L_pod', ['pod_mach.L', 'pod_mass.pod_len', 'levitation_group.l_pod'])
# Connects cycle group outputs to downstream components
self.connect('cycle.comp_len', 'pod_geometry.L_comp')
self.connect('cycle.comp_mass', 'pod_mass.comp_mass')
self.connect('cycle.comp.power', 'drivetrain.design_power')
self.connect('cycle.comp.trq', 'drivetrain.design_torque')
self.connect('cycle.comp.Fl_O:stat:area', 'pod_geometry.A_duct')
# Connects Drivetrain outputs to downstream components
self.connect('drivetrain.battery_mass', 'pod_mass.battery_mass')
self.connect('drivetrain.battery_length', 'pod_geometry.L_bat')
self.connect('drivetrain.motor_mass', 'pod_mass.motor_mass')
self.connect('drivetrain.motor_length', 'pod_geometry.L_motor')
# Connects Pod Geometry outputs to downstream components
self.connect('pod_geometry.A_pod', 'pod_mach.A_pod')
self.connect('pod_geometry.D_pod', ['pod_mass.podgeo_d', 'levitation_group.d_pod'])
self.connect('pod_geometry.BF', 'pod_mass.BF')
# Connects Levitation outputs to downstream components
# Connects Pod Mass outputs to downstream components
self.connect('pod_mass.pod_mass', 'levitation_group.m_pod')
if __name__ == "__main__":
prob = Problem()
root = prob.root = Group()
root.add('Pod', PodGroup())
params = (('comp_inlet_area', 2.3884, {'units': 'm**2'}),
('comp_PR', 6.0, {'units': 'unitless'}),
('PsE', 0.05588, {'units': 'psi'}),
('des_time', 1.0),
('time_of_flight', 1.0, {'units' : 'h'}),
('motor_max_current', 800.0),
('motor_LD_ratio', 0.83),
('motor_oversize_factor', 1.0),
('inverter_efficiency', 1.0),
('battery_cross_section_area', 15000.0, {'units': 'cm**2'}),
('n_passengers', 28.0),
('A_payload', 2.72),
('pod_mach_number', .8, {'units': 'unitless'}),
('tube_pressure', 850., {'units': 'Pa'}),
('tube_temp', 320., {'units': 'K'}),
('vel_b', 23.0, {'units': 'm/s'}),
('h_lev', 0.01, {'unit': 'm'}),
('vel', 350.0, {'units': 'm/s'}))
prob.root.add('des_vars', IndepVarComp(params))
prob.root.connect('des_vars.comp_inlet_area', 'Pod.comp_inlet_area')
prob.root.connect('des_vars.comp_PR', 'Pod.comp.map.PRdes')
prob.root.connect('des_vars.PsE', 'Pod.nozzle.Ps_exhaust')
prob.root.connect('des_vars.des_time', 'Pod.des_time')
prob.root.connect('des_vars.time_of_flight', 'Pod.time_of_flight')
prob.root.connect('des_vars.motor_max_current', 'Pod.motor_max_current')
prob.root.connect('des_vars.motor_LD_ratio', 'Pod.motor_LD_ratio')
prob.root.connect('des_vars.motor_oversize_factor', 'Pod.motor_oversize_factor')
prob.root.connect('des_vars.inverter_efficiency', 'Pod.inverter_efficiency')
prob.root.connect('des_vars.battery_cross_section_area', 'Pod.battery_cross_section_area')
prob.root.connect('des_vars.n_passengers', 'Pod.n_passengers')
prob.root.connect('des_vars.A_payload', 'Pod.A_payload')
prob.root.connect('des_vars.pod_mach_number', 'Pod.pod_mach')
prob.root.connect('des_vars.tube_pressure', 'Pod.tube_pressure')
prob.root.connect('des_vars.tube_temp', 'Pod.tube_temp')
prob.root.connect('des_vars.vel_b', 'Pod.vel_b')
prob.root.connect('des_vars.h_lev', 'Pod.h_lev')
prob.root.connect('des_vars.vel', 'Pod.vel')
prob.setup()
prob.root.list_connections()
# A_comp = np.linspace(1, 2.5, num = 50)
# L = np.zeros((1, 50))
#print(len(A_comp))
#print(len(L))
#for i in range(len(A_comp)):
# prob['des_vars.comp_inlet_area'] = A_comp[i]
# prob.run()
# L[0, i] = prob['Pod.pod_geometry.L_pod']
prob.run()
# plt.plot(A_comp, L[0, :])
# plt.show()
#prob.run()
print('\n')
print('total pressure %f' % (prob['Pod.cycle.FlowPathInputs.Pt']))
print('total temp %f' % prob['Pod.cycle.FlowPathInputs.Tt'])
print('mass flow %f' % prob['Pod.cycle.FlowPathInputs.m_dot'])
print('\n')
print('compressor mass %f' % prob['Pod.cycle.comp_mass'])
print('compressor power %f' % prob['Pod.cycle.comp.power'])
print('compressor trq %f' % prob['Pod.cycle.comp.trq'])
print('ram drag %f' % prob['Pod.inlet.F_ram'])
print('nozzle total temp %f' % prob['Pod.nozzle.Fl_O:tot:T'])
print('\n')
print('battery length %f' % prob['Pod.drivetrain.battery_length'])
print('battery volume %f' % prob['Pod.drivetrain.battery_volume'])
print('motor length %f' % prob['Pod.drivetrain.motor_length'])
print('battery mass %f' % prob['Pod.drivetrain.battery_mass'])
print('motor mass %f' % prob['Pod.drivetrain.motor_mass'])
print('\n')
print('pod length %f' % prob['Pod.L_pod'])
print('pod area %f' % prob['Pod.S'])
print('pod cross section %f' % prob['Pod.pod_geometry.A_pod'])
print('pod diameter %f' % prob['Pod.pod_geometry.D_pod'])
print('\n')
print('Tube Area %f' % prob['Pod.A_tube'])
print('\n')
print('pod mass w/o magnets %f' % prob['Pod.pod_mass.pod_mass'])
print('\n')
print('magnetic drag %f' % prob['Pod.mag_drag'])
print('mag mass %f' % prob['Pod.levitation_group.Mass.m_mag'])
print('total pod mass %f' % prob['Pod.total_pod_mass'])
| apache-2.0 |
RPGOne/Skynet | scipy-2017-sklearn-master/notebooks/figures/plot_pca.py | 5 | 3131 | from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
def plot_pca_illustration():
rnd = np.random.RandomState(5)
X_ = rnd.normal(size=(300, 2))
X_blob = np.dot(X_, rnd.normal(size=(2, 2))) + rnd.normal(size=2)
pca = PCA()
pca.fit(X_blob)
X_pca = pca.transform(X_blob)
S = X_pca.std(axis=0)
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
axes = axes.ravel()
axes[0].set_title("Original data")
axes[0].scatter(X_blob[:, 0], X_blob[:, 1], c=X_pca[:, 0], linewidths=0,
s=60, cmap='viridis')
axes[0].set_xlabel("feature 1")
axes[0].set_ylabel("feature 2")
axes[0].arrow(pca.mean_[0], pca.mean_[1], S[0] * pca.components_[0, 0],
S[0] * pca.components_[0, 1], width=.1, head_width=.3,
color='k')
axes[0].arrow(pca.mean_[0], pca.mean_[1], S[1] * pca.components_[1, 0],
S[1] * pca.components_[1, 1], width=.1, head_width=.3,
color='k')
axes[0].text(-1.5, -.5, "Component 2", size=14)
axes[0].text(-4, -4, "Component 1", size=14)
axes[0].set_aspect('equal')
axes[1].set_title("Transformed data")
axes[1].scatter(X_pca[:, 0], X_pca[:, 1], c=X_pca[:, 0], linewidths=0,
s=60, cmap='viridis')
axes[1].set_xlabel("First principal component")
axes[1].set_ylabel("Second principal component")
axes[1].set_aspect('equal')
axes[1].set_ylim(-8, 8)
pca = PCA(n_components=1)
pca.fit(X_blob)
X_inverse = pca.inverse_transform(pca.transform(X_blob))
axes[2].set_title("Transformed data w/ second component dropped")
axes[2].scatter(X_pca[:, 0], np.zeros(X_pca.shape[0]), c=X_pca[:, 0],
linewidths=0, s=60, cmap='viridis')
axes[2].set_xlabel("First principal component")
axes[2].set_aspect('equal')
axes[2].set_ylim(-8, 8)
axes[3].set_title("Back-rotation using only first component")
axes[3].scatter(X_inverse[:, 0], X_inverse[:, 1], c=X_pca[:, 0],
linewidths=0, s=60, cmap='viridis')
axes[3].set_xlabel("feature 1")
axes[3].set_ylabel("feature 2")
axes[3].set_aspect('equal')
axes[3].set_xlim(-8, 4)
axes[3].set_ylim(-8, 4)
def plot_pca_whitening():
rnd = np.random.RandomState(5)
X_ = rnd.normal(size=(300, 2))
X_blob = np.dot(X_, rnd.normal(size=(2, 2))) + rnd.normal(size=2)
pca = PCA(whiten=True)
pca.fit(X_blob)
X_pca = pca.transform(X_blob)
fig, axes = plt.subplots(1, 2, figsize=(10, 10))
axes = axes.ravel()
axes[0].set_title("Original data")
axes[0].scatter(X_blob[:, 0], X_blob[:, 1], c=X_pca[:, 0], linewidths=0, s=60, cmap='viridis')
axes[0].set_xlabel("feature 1")
axes[0].set_ylabel("feature 2")
axes[0].set_aspect('equal')
axes[1].set_title("Whitened data")
axes[1].scatter(X_pca[:, 0], X_pca[:, 1], c=X_pca[:, 0], linewidths=0, s=60, cmap='viridis')
axes[1].set_xlabel("First principal component")
axes[1].set_ylabel("Second principal component")
axes[1].set_aspect('equal')
axes[1].set_xlim(-3, 4)
| bsd-3-clause |
pombo-lab/gamtools | lib/gamtools/qc/fastqc.py | 1 | 5995 | """
=================
The qc.fastqc module
=================
The qc.fastqc module contains functions for parsing fastqc output files.
Code in this module was shamelessly stolen from
https://code.google.com/p/bioinformatics-misc/source/browse/trunk/fastqc_to_pgtable.py?spec=svn93&r=93
"""
import os
import numpy as np
import pandas as pd
def fastqc_data_file(input_fastq):
"""Given an input fastq file, return the name fastqc will use
for it's output file.
:param str input_fastq: Path to a fastq file.
:returns: Path to fastqc output files.
"""
base_folder = input_fastq.split('.')[0]
#base_folder = '.'.join(input_fastq.split('.')[:-1])
fastqc_folder = base_folder + '_fastqc'
return os.path.join(fastqc_folder, 'fastqc_data.txt')
def parse_module(fastqc_module):
"""
Parse a fastqc module from the table format to a line format (list).
Input is list containing the module. One list-item per line. E.g.:
fastqc_module= [
'>>Per base sequence quality pass',
'#Base Mean Median Lower Quartile Upper Quartile 10th Percentile 90th Percentile',
'1 36.34 38.0 35.0 39.0 31.0 40.0',
'2 35.64 38.0 35.0 39.0 28.0 40.0',
'3 35.50 38.0 34.0 39.0 28.0 40.0',
...
]
Return a list like this where each sublist after 1st is a column:
['pass', ['1', '2', '3', ...], ['36,34', '35.64', '35.50', ...], ['40.0', '40.0', '40.0', ...]]
"""
row_list = []
module_header = fastqc_module[0]
module_name = module_header.split('\t')[0]
# Line with module name >>Per base ... pass/warn/fail
row_list.append(module_header.split('\t')[1])
# Handle odd cases:
# Where no table is returned:
if len(fastqc_module) == 1 and module_name == '>>Overrepresented sequences':
return row_list + [[]] * 4
if len(fastqc_module) == 1 and module_name == '>>Kmer Content':
return row_list + [[]] * 5
# Table is not the secod row:
if module_name == '>>Sequence Duplication Levels':
tot_dupl = fastqc_module[1].split('\t')[1]
row_list.append(tot_dupl)
del fastqc_module[1]
# Conevrt table to list of lists:
tbl = []
for line in fastqc_module[2:]:
tbl.append(line.split('\t'))
# Put each column in a list:
nrows = len(tbl)
ncols = len(tbl[0])
for i in range(0, ncols):
col = []
for j in range(0, nrows):
col.append(tbl[j][i])
row_list.append(col)
return row_list
def is_mono_repeat(kmer):
"""
Return true if kmer represents a mono-nucleotide repeat (e.g. AAAA).
"""
return bool(len(set(kmer)) == 1)
def is_di_repeat(kmer):
"""
Return true if kmer represents a di-nucleotide repeat (e.g. ATATAT).
"""
if not len(set(kmer)) == 2:
return False
return bool(len(set(kmer[::2])) == 1 and len(set(kmer[1::2])) == 1)
def get_kmer_summary(module):
"""
Calculate the total number of kmers that are either mono- or di- nucleotide
repeats.
"""
kmer_data = parse_module(module)
kmers = kmer_data[1]
counts = list(map(float, kmer_data[3]))
summary_data = {'dinucleotide_repeats': 0,
'mononucleotide_repeats': 0}
for kmer, count in zip(kmers, counts):
if is_mono_repeat(kmer):
summary_data['mononucleotide_repeats'] += count
elif is_di_repeat(kmer):
summary_data['dinucleotide_repeats'] += count
return summary_data
def get_avg_qual(module):
"""
Get the average per-base-pair sequencing quality score.
"""
qual_data = parse_module(module)
qualities, counts = np.array(
list(map(int, qual_data[1]))), np.array(list(map(float, qual_data[2])))
avg = (qualities * counts).sum() / counts.sum()
return {'avg_quality': avg}
def get_sample(filename):
"""
Get the name of the input sample given the fastqc output file path.
"""
return os.path.basename(os.path.dirname(filename))[:-7]
def process_file(filename):
"""
Process a fastqc output file and calculate some summary statistics.
"""
fq_lines = open(filename).readlines()
fq_lines = [x.strip() for x in fq_lines]
fastqc_dict = {}
# Get start and end position of all modules:
mod_start = []
mod_end = []
for i, line in enumerate(fq_lines):
if line == '>>END_MODULE':
mod_end.append(i)
elif line.startswith('>>'):
mod_start.append(i)
else:
pass
# Start processing modules. First one (Basic statitics) is apart:
for start, end in zip(mod_start[1:], mod_end[1:]):
module = fq_lines[start:end]
module_name = module[0].split('\t')[0]
if module_name == '>>Kmer Content':
fastqc_dict.update(get_kmer_summary(module))
if module_name == '>>Per sequence quality scores':
fastqc_dict.update(get_avg_qual(module))
fastqc_dict['Sample'] = get_sample(filename)
return fastqc_dict
def get_quality_stats(input_fastqc_files):
"""
Iterate over a list of fastqc output files and generate a dataframe
containing summary statistics for each file.
"""
sample_qualities = []
for filename in input_fastqc_files:
sample_qualities.append(process_file(filename))
return pd.DataFrame(sample_qualities)
def write_quality_stats(input_files, output_file):
"""
Iterate over a list of fastqc output files and generate a dataframe
containing summary statistics for each file, then write the result
to disk.
"""
quality_df = get_quality_stats(input_files)
quality_df.to_csv(output_file, sep='\t', index=False)
def quality_qc_from_doit(dependencies, targets):
"""
Wrapper function to call write_quality_stats from argparse.
"""
assert len(targets) == 1
write_quality_stats(dependencies, targets[0])
| apache-2.0 |
nvoron23/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
HBNLdev/DataStore | db/meta.py | 1 | 8321 | '''refining and describing'''
from db import database as D
from db.utils import text as tU
from db.knowledge import drinking as drK
import pandas as pd
import numpy as np
from sklearn.neighbors import KernelDensity
from datetime import datetime as dt
from numbers import Number
from collections import Counter
import sys
default_sniff_guide = {'category cutoff':20,
'numeric tolerance':0,
'date tolerance':0}
def find_fields(doc,guide):
if not guide:
skip_cols = ['_id']
return [ k for k in doc.keys() if k not in skip_cols ]
else:
pass
def summarize_collection_meta(db,mdb,coll,coll_guide=None):
''' Scan collection and build description of fields.
'''
D.set_db(db)
cc = D.Mdb[coll].find() #collection_cursor
fields = set()
[ fields.update( find_fields(doc,coll_guide) ) for doc in cc ]
D.set_db(mdb)
D.Mdb['summaries'].insert_one({'name':coll,
'fields':list(fields)})
def sniff_field_type(vals,sniff_guide={}):
''' determines type as one of:
- categorical
- numeric
- datetime
- unknown
'''
sg = default_sniff_guide.copy()
sg.update(sniff_guide)
if len(vals) <= sg['category cutoff']:
return 'categorical'
else:
num_ck_cnts = Counter([isinstance(v,Number) for v in vals])
num_cnt = sorted([ (v,k) for k,v in num_ck_cnts.items() ])
if num_cnt[-1][1]:
if len(num_cnt) == 1 or num_cnt[-2][0]/num_cnt[-1][0] < sg['numeric tolerance']:
return 'numeric'
else:
date_ck_cnts = Counter([isinstance(v,dt) for v in vals])
date_cnt = sorted([ (v,k) for k,v in date_ck_cnts.items() ])
if date_cnt[-1][1]:
if len(date_cnt) == 1 or date_cnt[-2][0]/date_cnt[-1][0] < sg['date tolerance']:
return 'datetime'
return 'unknown'
def try_con(conF,v):
try:
ck = conF(v)
return (ck,True)
except:
return (None,False)
def field_ranges(db,mdb,coll,guide={},sniff_guide={}):
D.set_db(mdb)
summ = D.Mdb['summaries'].find_one({'name':coll})
ranges = {}
D.set_db(db)
for fd in summ['fields']:
skip_col = False; con_flag = False
if 'skip patterns' in guide:
for sp in guide['skip patterns']:
if sp in fd:
skip_col = True
if not skip_col:
Dvals = D.Mdb[coll].distinct(fd)
Ndocs = D.Mdb[coll].find().count() # should have query to skip descriptive docs
if fd in guide:
fgd = guide[fd]
vtype = fgd['type']
if 'converter' in fgd:
Dvals = [ fgd['converter'](v) for v in Dvals ]
con_flag = True
else:
vtype = sniff_field_type(Dvals, sniff_guide )
if vtype in ['categorical','numeric','datetime']:
raw_vals = [d[fd] for d in \
D.Mdb[coll].find({fd:{'$exists':True}},{fd:1}) ]
rawV_Nnan = [ rv for rv in raw_vals if not pd.isnull(rv)]
data_portion = len(rawV_Nnan)/len(raw_vals)
if con_flag:
Cvals_ck = [ try_con(fgd['converter'],v) for v in raw_vals ]
if vtype == 'categorical':
try:
v_counts = sorted([ (v,k) for k,v in Counter(rawV_Nnan).items() ])[:20]
if all([ v.replace('.','').isdigit() for c,v in v_counts if type(v)==str ]):
converter = int
if any([ ( type(v[1]) == str and '.' in v[1] ) or isinstance(v[1],Number)\
for v in v_counts ]):
converter = float
print('num cat conv:',converter)
ranges[fd] = { 'type':'num cat',
'value counts':sorted([ ( converter(v),k ) for k,v in v_counts]),
'data portion':data_portion }
else:
print('cat')
ranges[fd] = {'type':'categorical',
'value counts':[ (v,k) for k,v in v_counts],
'data portion':data_portion}
except:
ranges[fd] = {'skipped':'categorical error'}
#print( fd, set([type(v) for v in raw_vals]), set(raw_vals) )
elif vtype == 'numeric':
pres_vals = [v for v in rawV_Nnan if v]
if not con_flag:
Cvals_ck = [ try_con(float,v) for v in Dvals ]
Cvals = [ cc[0] for cc in Cvals_ck if cc[1] ]
try:
mn = np.mean(Cvals)
med = np.median(Cvals)
std = np.std(Cvals)
low = min(Cvals)
hi = max(Cvals)
ranges[fd] = {'type':'numeric',
'min':low,
'max':hi,
'mean':mn,
'median':med,
'std':std,
'portion':len(Cvals)/Ndocs,
#add bad bad vals with trace
'Nmin':(low - mn)/std,
'Nmax':(hi - mn)/std,
'Nmedian':(med - mn)/std,
'data portion':data_portion,
}
for pct in [1,5,25,75,95,99]:
pctV = np.percentile(Cvals,pct)
spct = str(pct)
ranges[fd]['p'+spct] = pctV
ranges[fd]['Np'+spct] = (pctV - mn)/std
kernel = KernelDensity(kernel='gaussian',bandwidth=4).fit(np.array(Cvals)[:,np.newaxis])
pdf_xs = np.linspace(ranges[fd]['p1'],ranges[fd]['p99'],150)[:,np.newaxis]
pdf_vals = [np.exp(v) for v in kernel.score_samples(pdf_xs)]
ranges[fd]['pdf_x'] = list( np.squeeze(pdf_xs) )
ranges[fd]['Npdf_x'] = list( np.squeeze((pdf_xs-mn)/std) )
ranges[fd]['pdf'] = list( np.squeeze(pdf_vals) )
except:
D.set_db(mdb)
D.Mdb['errors'].insert_one({'collection':coll,
'field':fd,
#'data':list(set(Cvals)),
'error':str(sys.exc_info()[1])
})
D.set_db(db)
elif vtype == 'datetime':
if not con_flag:
Cvals_ck = [ try_con(float,v) for v in Dvals ]
Cvals = [ cc[0] for cc in Cvals_ck if cc[1] ]
if len(Cvals) == 0:
Cvals = [-1]
try:
ranges[fd] = {'type':'datetime',
'min':min(Cvals),
'max':max(Cvals),
'std':np.std(Cvals),
'portion':len(Cvals)/Ndocs,
'data portion':data_portion, }
except:
D.set_db(mdb)
D.Mdb['errors'].insert_one({'collection':coll,
'field':fd,
#'data':str(list(set(Cvals))),
'error':str(sys.exc_info()[1])
} )
D.set_db(db)
elif vtype == 'unknown':
ranges[fd] = {'type':'unknown',
'values subset':Dvals[:10] }
else:
ranges[fd] = {'type':vtype,
'count':len(Dvals)}
else:
ranges[fd] = {'skipped':'guide'}
D.set_db(mdb)
ranges['collection name'] = coll
D.Mdb['ranges'].insert_one(ranges) | gpl-3.0 |
MPIBGC-TEE/CompartmentalSystems | tests/Test_myOdeResult.py | 1 | 4986 | import unittest
from testinfrastructure.InDirTest import InDirTest
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from sympy import Symbol, Piecewise
from scipy.interpolate import interp1d
from CompartmentalSystems.myOdeResult import get_sub_t_spans, solve_ivp_pwc
class TestmyOdeResult(InDirTest):
def test_solve_ivp_pwc(self):
t = Symbol('t')
ms = [1, -1, 1]
disc_times = [410, 820]
t_start = 0
t_end = 1000
ref_times = np.arange(t_start, t_end, 10)
times = np.array([0, 300, 600, 900])
t_span = (t_start, t_end)
# first build a single rhs
m = Piecewise(
(ms[0], t < disc_times[0]),
(ms[1], t < disc_times[1]),
(ms[2], True)
)
def rhs(t, x): return m.subs({'t': t})
x0 = np.asarray([0])
# To see the differencte between a many piece and
# a one piece solotion
# we deliberately chose a combination
# of method and first step where the
# solver will get lost if it does not restart
# at the disc times.
sol_obj = solve_ivp_pwc(
rhs,
t_span,
x0,
t_eval=times,
method='RK45',
first_step=None
)
def funcmaker(m):
return lambda t, x: m
rhss = [funcmaker(m) for m in ms]
ref_func = interp1d(
[t_start] + list(disc_times) + [t_end],
[0.0, 410.0, 0.0, 180.0]
)
self.assertFalse(
np.allclose(
ref_func(times),
sol_obj.y[0, :],
atol=400
)
)
sol_obj_pw = solve_ivp_pwc(
rhss,
t_span,
x0,
method='RK45',
first_step=None,
disc_times=disc_times
)
self.assertTrue(
np.allclose(
ref_func(sol_obj_pw.t),
sol_obj_pw.y[0, :]
)
)
self.assertTrue(
np.allclose(
ref_func(ref_times),
sol_obj_pw.sol(ref_times)[0, :]
)
)
sol_obj_pw_t_eval = solve_ivp_pwc(
rhss,
t_span,
x0,
t_eval=times,
method='RK45',
first_step=None,
disc_times=disc_times
)
self.assertTrue(
np.allclose(
times,
sol_obj_pw_t_eval.t
)
)
self.assertTrue(
np.allclose(
ref_func(times),
sol_obj_pw_t_eval.y[0, :]
)
)
fig, ax = plt.subplots(
nrows=1,
ncols=1
)
ax.plot(
ref_times,
ref_func(ref_times),
color='blue',
label='ref'
)
ax.plot(
times,
ref_func(times),
'*',
label='ref points',
)
ax.plot(
sol_obj.t,
sol_obj.y[0, :],
'o',
label='pure solve_ivp'
)
ax.plot(
sol_obj_pw.t,
sol_obj_pw.y[0, :],
'+',
label='solve_ivp_pwc',
ls='--'
)
ax.legend()
fig.savefig('inaccuracies.pdf')#, tight_layout=True)
def test_sub_t_spans(self):
disc_times = np.array([2, 3, 4])
t_spans = [
(0, 1), (0, 2), (0, 2.5), (0, 5),
(2, 2), (2, 2.5), (2, 3.2), (2, 5),
(3.2, 4), (3.5, 4.5),
(4, 5), (5, 7)
]
refs = [
[(0, 1), (), (), ()],
[(0, 2), (2, 2), (), ()],
[(0, 2), (2, 2.5), (), ()],
[(0, 2), (2, 3), (3, 4), (4, 5)],
[(2, 2), (2, 2), (), ()],
[(2, 2), (2, 2.5), (), ()],
[(2, 2), (2, 3), (3, 3.2), ()],
[(2, 2), (2, 3), (3, 4), (4, 5)],
[(), (), (3.2, 4), (4, 4)],
[(), (), (3.5, 4), (4, 4.5)],
[(), (), (4, 4), (4, 5)],
[(), (), (), (5, 7)]
]
for t_span, ref in zip(t_spans, refs):
with self.subTest():
sub_t_spans = get_sub_t_spans(t_span, disc_times)
self.assertEqual(sub_t_spans, ref)
###############################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.discover(".", pattern=__file__)
# # Run same tests across 16 processes
# concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(1))
# runner = unittest.TextTestRunner()
# res=runner.run(concurrent_suite)
# # to let the buildbot fail we set the exit value !=0
# # if either a failure or error occurs
# if (len(res.errors)+len(res.failures))>0:
# sys.exit(1)
unittest.main()
| mit |
newemailjdm/scipy | scipy/spatial/_plotutils.py | 53 | 4034 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
for simplex in hull.simplices:
ax.plot(hull.points[simplex,0], hull.points[simplex,1], 'k-')
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
ax.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], 'k-')
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
ax.plot([vor.vertices[i,0], far_point[0]],
[vor.vertices[i,1], far_point[1]], 'k--')
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
zooniverse/aggregation | active_weather/old/learning.py | 1 | 9971 | # try:
# import matplotlib
# matplotlib.use('WXAgg')
# except ImportError:
# pass
from skimage.transform import probabilistic_hough_line
from skimage.feature import canny
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
try:
import Image
except ImportError:
from PIL import Image
from sklearn import neighbors
from sklearn.decomposition import PCA
from sklearn import svm,metrics
import sqlite3
class Classifier:
def __init__(self):
self.conn = sqlite3.connect('/home/ggdhines/example.db')
def __p_classification__(self,array):
pass
def __set_image__(self,image):
self.image = image
def __normalize_pixels__(self,image,pts):
X,Y = zip(*pts)
max_x = max(X)
min_x = min(X)
max_y = max(Y)
min_y = min(Y)
# print (max_x-min_x),(max_y-min_y)
if (12 <= (max_x-min_x) <= 14) and (12 <= (max_y-min_y) <= 14):
return -2,-2,1.
desired_height = 20.
width_ratio = (max_x-min_x)/desired_height
height_ratio = (max_y-min_y)/desired_height
# calculate the resulting height or width - we want the maximum of these value to be 20
if width_ratio > height_ratio:
# wider than taller
# todo - probably not a digit
width = int(desired_height)
height = int(desired_height*(max_y-min_y)/float(max_x-min_x))
else:
height = int(desired_height)
# print (max_y-max_y)/float(max_x-min_x)
width = int(desired_height*(max_x-min_x)/float(max_y-min_y))
# the easiest way to do the rescaling is to make a subimage which is a box around the digit
# and just get the Python library to do the rescaling - takes care of anti-aliasing for you :)
# obviously this box could contain ink that isn't a part of this digit in particular
# so we just need to be careful about what pixel we extract from the
r = range(min_y,max_y+1)
c = range(min_x,max_x+1)
# print (min_y,max_y+1)
# print (min_x,max_x+1)
# todo - this will probably include noise-pixels, so we need to redo this
template = image[np.ix_(r, c)]
zero_template = np.zeros((len(r),len(c),3))
for (x,y) in pts:
# print (y-min_y,x-min_x),zero_template.shape
# print zero_template[(y-min_y,x-min_x)]
# print image[(y,x)]
zero_template[(y-min_y,x-min_x)] = image[(y,x)]
# cv2.imwrite("/home/ggdhines/aa.png",np.uint8(np.asarray(zero_template)))
# i = Image.fromarray(np.uint8(np.asarray(zero_template)))
# i.save("/home/ggdhines/aa.png")
# assert False
digit_image = Image.fromarray(np.uint8(np.asarray(zero_template)))
# plt.show()
# cv2.imwrite("/home/ggdhines/aa.png",np.uint8(np.asarray(template)))
# raw_input("template extracted")
# continue
digit_image = digit_image.resize((width,height),Image.ANTIALIAS)
# print zero_template.shape
if min(digit_image.size) == 0:
return
# digit_image.save("/home/ggdhines/aa.png")
# digit_image = digit_image.convert('L')
grey_image = np.asarray(digit_image.convert('L'))
# # we need to center subject
# if height == 28:
# # center width wise
#
# y_offset = 0
# else:
#
# x_offset = 0
x_offset = int(28/2 - width/2)
y_offset = int(28/2 - height/2)
digit_array = np.asarray(digit_image)
centered_array = [0 for i in range(28**2)]
for y in range(len(digit_array)):
for x in range(len(digit_array[0])):
# dist1 = math.sqrt(sum([(a-b)**2 for (a,b) in zip(digit_array[y][x],ref1)]))
# if dist1 > 10:
# if digit_array[y][x] > 0.4:
# plt.plot(x+x_offset,y+y_offset,"o",color="blue")
# digit_array[y][x] = digit_array[y][x]/255.
# print digit_array[y][x] - most_common_colour
# dist = math.sqrt(sum([(int(a)-int(b))**2 for (a,b) in zip(digit_array[y][x],most_common_colour)]))
dist = math.sqrt(sum([(int(a)-int(b))**2 for (a,b) in zip(digit_array[y][x],(0,0,0))]))
if dist > 30:#digit_array[y][x] > 10:
centered_array[(y+y_offset)*28+(x+x_offset)] = grey_image[y][x]#/float(darkest_pixel)
else:
centered_array[(y+y_offset)*28+(x+x_offset)] = 0
return centered_array,28
def __identify_digit__(self,image,pts,collect_gold_standard=True):
"""
identify a cluster of pixels, given by pts
image is needed for rescaling
:param image:
:param pts:
:return:
"""
gold_standard_digits = []
digit_probabilities = []
# do dbscan
centered_array,size = self.__normalize_pixels__(image,pts)
algorithm_digit,digit_prob = self.__p_classification__(centered_array)
# print digit_probabilities
digit = ""
if collect_gold_standard:
for y in range(size):
for x in range(size):
p = y*size+x
if centered_array[p] > 0:
print "*",
else:
print " ",
print
print "knn thinks this is a " + str(algorithm_digit) + " with probability " + str(digit_prob)
while digit == "":
digit = raw_input("enter digit - ")
if digit == "o":
gold_standard = -1
else:
gold_standard = int(digit)
else:
gold_standard = None
return gold_standard,algorithm_digit,digit_prob
class NearestNeighbours(Classifier):
def __init__(self):
Classifier.__init__(self)
n_neighbors = 25
mndata = MNIST('/home/ggdhines/Databases/mnist')
self.training = mndata.load_training()
print type(self.training[0][0])
weight = "distance"
self.clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weight)
pca = PCA(n_components=50)
self.T = pca.fit(self.training[0])
reduced_training = self.T.transform(self.training[0])
print sum(pca.explained_variance_ratio_)
# clf.fit(training[0], training[1])
self.clf.fit(reduced_training, self.training[1])
def __p_classification__(self,centered_array):
centered_array = np.asarray(centered_array)
# print centered_array
centered_array = self.T.transform(centered_array)
t = self.clf.predict_proba(centered_array)
digit_prob = max(t[0])
# digit_probabilities.append(max(t[0]))
algorithm_digit = list(t[0]).index(max(t[0]))
return algorithm_digit,digit_prob
class SVM(Classifier):
def __init__(self):
Classifier.__init__(self)
self.classifier = svm.SVC(gamma=0.001,probability=True)
mndata = MNIST('/home/ggdhines/Databases/mnist')
training = mndata.load_training()
self.classifier.fit(training[0], training[1])
class HierarchicalNN(Classifier):
def __init__(self):
Classifier.__init__(self)
cursor = self.conn.cursor()
cursor.execute("select algorithm_classification, gold_classification from cells")
r = cursor.fetchall()
predicted,actual = zip(*r)
confusion_matrix = metrics.confusion_matrix(predicted,actual,labels= np.asarray([0,1,2,3,4,5,6,7,8,9,-1,-2]))
clusters = range(10)
for i in range(10):
for j in range(10):
if i == j:
continue
if confusion_matrix[i][j] > 3:
t = max(clusters[i],clusters[j])
t_old = min(clusters[i],clusters[j])
for k,c in enumerate(clusters):
if c == t_old:
clusters[k] = t
print clusters
mndata = MNIST('/home/ggdhines/Databases/mnist')
training = mndata.load_training()
testing = mndata.load_testing()
labels = training[1]#[clusters[t] for t in training[1]]
pca = PCA(n_components=50)
self.T = pca.fit(training[0])
f = self.T.components_.reshape((50,28,28))
assert False
reduced_training = self.T.transform(training[0])
# starting variance explained
print sum(pca.explained_variance_ratio_)
# map classes
mapped_labels = [clusters[i] for i in labels]
weight = "distance"
n_neighbors = 15
self.clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weight)
self.clf.fit(reduced_training, labels)
test_labels = testing[1]#[clusters[t] for t in testing[1]]
reduced_testing = self.T.transform(testing[0])
predictions = self.clf.predict(reduced_testing)
print sum([1 for (t,p) in zip(test_labels,predictions) if t==p])/float(len(test_labels))
# # filter
# filtered_training = [(training[0][i],training[1][i]) for i in range(len(training[0])) if labels[training[1][i]] == 6]
# data,new_labels = zip(*filtered_training)
# self.T = pca.fit(data)
# reduced_training = self.T.transform(data)
# self.clf.fit(reduced_training, new_labels)
#
# filtered_testing = [(testing[0][i],testing[1][i]) for i in range(len(testing[0])) if labels[testing[1][i]] == 6]
# testing,new_labels = zip(*filtered_testing)
# reduced_testing = self.T.transform(testing)
# predictions = self.clf.predict(reduced_testing)
# print sum([1 for (t,p) in zip(new_labels,predictions) if t==p])/float(len(new_labels)) | apache-2.0 |
agartland/utils | ics/.ipynb_checkpoints/plotting-checkpoint.py | 1 | 12289 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import palettable
import itertools
from hclusterplot import plotHCluster
import re
from myboxplot import myboxplot
import networkx as nx
import seaborn as sns
sns.set(style='darkgrid', palette='muted', font_scale=1.5)
__all__ = ['icsTicks',
'icsTickLabels',
'swarmBox']
from .loading import *
from .analyzing import *
icsTicks = np.log10([0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1])
icsTickLabels = ['0.01', '0.025', '0.05', '0.1', '0.25', '0.5', '1']
# icsTicks = np.log10([0.01, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1])
#icsTickLabels = ['0.01','0.025', '0.05', '0.1','0.2','0.4','0.6','0.8', '1']
def prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset='CD4+', column='pvalue', cutoff='pvalue', pAdjust=True, allSubsets=False):
cytokineSubsets = jDf.cytokine.unique()
subset = cytokineSubsets[0].replace('-', '+').split('+')[:-1]
cyCols = [c for c in cytokineSubsets if not c == '-'.join(subset)+'-']
ind = (jDf.tcellsub == tcellsubset) & (jDf.visitno == visitno) & (jDf.TreatmentGroupID.isin(rxIDs))
agInd = (jDf.antigen == antigen) & ind
pvalueDf = pivotPvalues(jDf.loc[agInd], adjust=pAdjust)
"""Use cutoff from HVTN ICS SAP, p < 0.00001"""
responseAlpha = 1e-5
callDf = (pvalueDf < responseAlpha).astype(float)
magDf = jDf.loc[agInd].pivot(index='sample', columns='cytokine', values='mag')
magAdjDf = jDf.loc[agInd].pivot(index='sample', columns='cytokine', values='mag_adj')
bgDf = jDf.loc[agInd].pivot(index='sample', columns='cytokine', values='bg')
"""Positive subsets (to-be plotted) includes all columns unless a cutoff is specified"""
if cutoff == 'mag':
posSubsets = pvalueDf[cyCols].columns[(magDf[cyCols] > 0.00025).any(axis=0)]
elif cutoff == 'mag_adj':
posSubsets = pvalueDf[cyCols].columns[(magAdjDf[cyCols] > 0.00025).any(axis=0)]
elif cutoff == 'bg':
posSubsets = pvalueDf[cyCols].columns[(bgDf[cyCols] > 0).any(axis=0)]
elif cutoff == 'pvalue':
posSubsets = pvalueDf[cyCols].columns[(callDf[cyCols] > 0).any(axis=0)]
else:
posSubsets = pvalueDf[cyCols].columns
if allSubsets:
posSubsets = sorted(cytokineSubsets, key=lambda s: s.count('+'), reverse=True)
else:
posSubsets = sorted(posSubsets, key=lambda s: s.count('+'), reverse=True)
if column == 'pvalue':
plotDf = callDf
elif column == 'mag':
plotDf = magDf.applymap(np.log)
elif column == 'mag_adj':
plotDf = magAdjDf.applymap(np.log)
elif column == 'bg':
plotDf = bgDf.applymap(np.log)
"""Give labels a more readable look"""
plotDf = plotDf.rename_axis(cytokineSubsetLabel, axis=1)
posSubsets = list(map(cytokineSubsetLabel, posSubsets))
return plotDf[posSubsets]
def plotPolyBP(jDf,
antigen,
rxIDs,
visitno,
tcellsubset='CD4+',
column='pvalue', cutoff='pvalue',
pAdjust=True,
allSubsets=False, plotSubsets=None, returnPlotSubsets=False):
if plotSubsets is None:
plotDf = prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets)
posSubsets = plotDf.columns
else:
plotDf = prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=True)
posSubsets = plotSubsets
cbt = np.log([0.0001, 0.00025, 0.0005, 0.001, 0.002, 0.004, 0.006, 0.008, 0.01])
cbtl = ['0.01', '0.025', '0.05', '0.1', '0.2', '0.4', '0.6', '0.8', '1']
plt.clf()
plotDf = pd.DataFrame(plotDf.stack().reset_index())
plotDf = plotDf.set_index('sample')
plotDf = plotDf.join(ptidDf[['TreatmentGroupID', 'TreatmentGroupName']], how='left').sort_values(by='TreatmentGroupID')
if column == 'mag' or column == 'mag_adj':
plotDf[0].loc[(plotDf[0] < np.log(0.00025)) | plotDf[0].isnull()] = np.log(0.00025)
yl = np.log([0.0002, 0.01])
elif column == 'bg':
plotDf[0].loc[(plotDf[0] < np.log(0.00001)) | plotDf[0].isnull()] = np.log(0.00001)
yl = np.log([0.00001, 0.01])
else:
print('Must specify mag, mag_adj or bg (not %s)' % column)
axh = plt.subplot(111)
sns.boxplot(x='cytokine', y=0, data=plotDf, hue='TreatmentGroupName', fliersize=0, ax=axh, order=posSubsets)
sns.stripplot(x='cytokine', y=0, data=plotDf, hue='TreatmentGroupName', jitter=True, ax=axh, order=posSubsets)
plt.yticks(cbt, cbtl)
plt.ylim(yl)
plt.xticks(list(range(len(posSubsets))), posSubsets, fontsize='large', fontname='Consolas')
plt.ylabel('% cytokine expressing cells')
handles, labels = axh.get_legend_handles_labels()
l = plt.legend(handles[len(rxIDs):], labels[len(rxIDs):], loc='upper right')
if returnPlotSubsets:
return axh, posSubsets
else:
return axh
def plotPolyHeat(jDf, antigen, rxIDs, visitno, tcellsubset='CD4+', cluster=False, column='pvalue', cutoff='pvalue', pAdjust=True, allSubsets=False):
plotDf = prepPlotDf(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets)
posSubsets = plotDf.columns
plotDf = plotDf.join(ptidDf[['TreatmentGroupID', 'TreatmentGroupName']], how='left').sort_values(by='TreatmentGroupID')
cbt = np.log([0.0001, 0.00025, 0.0005, 0.001, 0.002, 0.004, 0.006, 0.008, 0.01])
cbtl = ['0.01', '0.025', '0.05', '0.1', '0.2', '0.4', '0.6', '0.8', '1']
if cluster:
clusterBool = [True, True]
else:
clusterBool = [False, False]
if column == 'pvalue':
vRange = [0, 2]
elif column == 'mag':
vRange = np.log([0.0001, 0.01])
elif column == 'mag_adj':
vRange = np.log([0.0001, 0.01])
elif column == 'bg':
vRange = np.log([0.0001, 0.01])
#valVec = tmp[posSubsets].values.flatten()
#vRange = [log(valVec[valVec>0].min()),log(valVec.max())]
ptidInd, cyColInd, handles = plotHCluster(plotDf[posSubsets],
row_labels=plotDf.TreatmentGroupID,
cmap=palettable.colorbrewer.sequential.YlOrRd_9.mpl_colormap,
yTickSz=None,
xTickSz='large',
clusterBool=clusterBool,
vRange=vRange)
if column == 'pvalue':
handles['cb'].remove()
else:
handles['cb'].set_ticks(cbt)
handles['cb'].set_ticklabels(cbtl)
handles['cb'].set_label('% cells')
for xh in handles['xlabelsL']:
xh.set_rotation(0)
handles['heatmapAX'].grid(b=None)
#handles['heatmapGS'].tight_layout(handles['fig'], h_pad=0.1, w_pad=0.5)
return handles
def plotResponsePattern(jDf, antigen, rxIDs, visitno, tcellsubset='CD4+', column='pvalue', cluster=False, cutoff='pvalue', pAdjust=True, boxplot=False, allSubsets=False):
if column == 'pvalue' and boxplot:
boxplot = False
print('Forced heatmap for p-value plotting.')
if boxplot:
axh = plotPolyBP(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets)
else:
axh = plotPolyHeat(jDf, antigen, rxIDs, visitno, tcellsubset=tcellsubset, cluster=cluster, column=column, cutoff=cutoff, pAdjust=pAdjust, allSubsets=allSubsets)
return axh
def _szscale(vec, mx=np.inf, mn=1):
"""Normalize values of vec to [mn, mx] interval
such that sz ratios remain representative."""
factor = mn/np.nanmin(vec)
vec = vec*factor
vec[vec > mx] = mx
vec[np.isnan(vec)] = mn
return vec
def plotPolyFunNetwork(cdf):
"""This visualization isn't promising, but its also the start to how
I'd think about defining a pairwise sample distance matrix. Instead
of considering each subset as independent they could be related by their
distance on this graph (just the sum of the binayr vector representation),
then the distance would be somekind of earth over's distance between the two graphs"""
binSubsets = np.concatenate([m[None, :] for m in map(_subset2vec, cdf.cytokine.unique())], axis=0)
nColors = (np.unique(binSubsets.sum(axis=1)) > 0).sum()
cmap = sns.light_palette('red', as_cmap=True, n_colors=nColors)
freqDf = cdf.groupby('cytokine')['mag'].agg(np.mean)
freqDf = freqDf.drop(vec2subset([0]*len(binSubsets)), axis=0)
g = nx.Graph()
for ss,f in freqDf.iteritems():
g.add_node(ss, freq=f, fscore=subset2vec(ss).sum())
for ss1, ss2 in itertools.product(freqDf.index, freqDf.index):
if np.abs(subset2vec(ss1) - subset2vec(ss2)).sum() <= 1:
g.add_edge(ss1, ss2)
nodesize = np.array([d['freq'] for n, d in g.nodes(data=True)])
nodecolor = np.array([d['fscore'] for n, d in g.nodes(data=True)])
nodecolor = (nodecolor - nodecolor.min() + 1) / (nodecolor.max() - nodecolor.min() + 1)
freq = {n:d['freq'] for n, d in g.nodes(data=True)}
pos = nx.nx_pydot.graphviz_layout(g, prog=layout, root=max(list(freq.keys()), key=freq.get))
#pos = spring_layout(g)
#pos = spectral_layout(g)
#layouts = ['twopi', 'fdp', 'circo', 'neato', 'dot', 'spring', 'spectral']
#pos = nx.graphviz_layout(g, prog=layout)
plt.clf()
figh = plt.gcf()
axh = figh.add_axes([0.04, 0.04, 0.92, 0.92])
axh.axis('off')
figh.set_facecolor('white')
#nx.draw_networkx_edges(g,pos,alpha=0.5,width=sznorm(edgewidth,mn=0.5,mx=10), edge_color='k')
#nx.draw_networkx_nodes(g,pos,node_size=sznorm(nodesize,mn=500,mx=5000),node_color=nodecolors,alpha=1)
for e in g.edges_iter():
x1, y1=pos[e[0]]
x2, y2=pos[e[1]]
props = dict(color='black', alpha=0.4, zorder=1)
plt.plot([x1, x2], [y1, y2], '-', lw=2, **props)
plt.scatter(x=[pos[s][0] for s in g.nodes()],
y=[pos[s][1] for s in g.nodes()],
s=_szscale(nodesize, mn=20, mx=200), #Units for scatter is (size in points)**2
c=nodecolor,
alpha=1, zorder=2, cmap=cmap)
for n, d in g.nodes(data=True):
if d['freq'] >= 0:
plt.annotate(n,
xy=pos[n],
fontname='Arial',
size=10,
weight='bold',
color='black',
va='center',
ha='center')
def swarmBox(data, x, y, hue, palette=None, order=None, hue_order=None, connect=False):
"""Depends on plot order of the swarm plot which does not seem dependable at the moment.
Better idea would be to adopt code from the actual swarm function for this, adding boxplots separately"""
if palette is None:
palette = sns.color_palette('Set2', n_colors=data[hue].unique().shape[0])
if hue_order is None:
hue_order = sorted(data[hue].unique())
if order is None:
order = sorted(data[x].unqiue())
params = dict(data=data, x=x, y=y, hue=hue, palette=palette, order=order, hue_order=hue_order)
sns.boxplot(**params, fliersize=0, linewidth=0.5)
swarm = sns.swarmplot(**params, linewidth=0.5, edgecolor='black', dodge=True)
if connect:
zipper = [order] + [swarm.collections[i::len(hue_order)] for i in range(len(hue_order))]
for z in zip(*zipper):
curx = z[0]
collections = z[1:]
offsets = []
for c,h in zip(collections, hue_order):
ind = (data[x] == curx) & (data[hue] == h)
sortii = np.argsort(np.argsort(data.loc[ind, y]))
offsets.append(c.get_offsets()[sortii,:])
for zoffsets in zip(*offsets):
xvec = [o[0] for o in zoffsets]
yvec = [o[1] for o in zoffsets]
plt.plot(xvec, yvec, '-', color='gray', linewidth=0.5)
plt.legend([plt.Circle(1, color=c) for c in palette], hue_order, title=hue)
| mit |
MostafaGazar/tensorflow | tensorflow/examples/skflow/iris.py | 25 | 1649 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
gibbons/scikit-rf | skrf/plotting.py | 3 | 19735 |
'''
.. module:: skrf.plotting
========================================
plotting (:mod:`skrf.plotting`)
========================================
This module provides general plotting functions.
Plots and Charts
------------------
.. autosummary::
:toctree: generated/
smith
plot_smith
plot_rectangular
plot_polar
plot_complex_rectangular
plot_complex_polar
Misc Functions
-----------------
.. autosummary::
:toctree: generated/
save_all_figs
add_markers_to_lines
legend_off
func_on_all_figs
scrape_legend
'''
import pylab as plb
import numpy as npy
from matplotlib.patches import Circle # for drawing smith chart
from matplotlib.pyplot import quiver
from matplotlib import rcParams
#from matplotlib.lines import Line2D # for drawing smith chart
def smith(smithR=1, chart_type = 'z', draw_labels = False, border=False,
ax=None, ref_imm = 1.0):
'''
plots the smith chart of a given radius
Parameters
-----------
smithR : number
radius of smith chart
chart_type : ['z','y']
Contour type. Possible values are
* *'z'* : lines of constant impedance
* *'y'* : lines of constant admittance
draw_labels : Boolean
annotate real and imaginary parts of impedance on the
chart (only if smithR=1)
border : Boolean
draw a rectangular border with axis ticks, around the perimeter
of the figure. Not used if draw_labels = True
ax : matplotlib.axes object
existing axes to draw smith chart on
ref_imm : number
Reference immittance for center of Smith chart. Only changes
labels, if printed.
'''
##TODO: fix this function so it doesnt suck
if ax == None:
ax1 = plb.gca()
else:
ax1 = ax
# contour holds matplotlib instances of: pathes.Circle, and lines.Line2D, which
# are the contours on the smith chart
contour = []
# these are hard-coded on purpose,as they should always be present
rHeavyList = [0,1]
xHeavyList = [1,-1]
#TODO: fix this
# these could be dynamically coded in the future, but work good'nuff for now
if not draw_labels:
rLightList = plb.logspace(3,-5,9,base=.5)
xLightList = plb.hstack([plb.logspace(2,-5,8,base=.5), -1*plb.logspace(2,-5,8,base=.5)])
else:
rLightList = plb.array( [ 0.2, 0.5, 1.0, 2.0, 5.0 ] )
xLightList = plb.array( [ 0.2, 0.5, 1.0, 2.0 , 5.0, -0.2, -0.5, -1.0, -2.0, -5.0 ] )
# cheap way to make a ok-looking smith chart at larger than 1 radii
if smithR > 1:
rMax = (1.+smithR)/(1.-smithR)
rLightList = plb.hstack([ plb.linspace(0,rMax,11) , rLightList ])
if chart_type is 'y':
y_flip_sign = -1
else:
y_flip_sign = 1
# loops through Light and Heavy lists and draws circles using patches
# for analysis of this see R.M. Weikles Microwave II notes (from uva)
for r in rLightList:
center = (r/(1.+r)*y_flip_sign,0 )
radius = 1./(1+r)
contour.append( Circle( center, radius, ec='grey',fc = 'none'))
for x in xLightList:
center = (1*y_flip_sign,1./x)
radius = 1./x
contour.append( Circle( center, radius, ec='grey',fc = 'none'))
for r in rHeavyList:
center = (r/(1.+r)*y_flip_sign,0 )
radius = 1./(1+r)
contour.append( Circle( center, radius, ec= 'black', fc = 'none'))
for x in xHeavyList:
center = (1*y_flip_sign,1./x)
radius = 1./x
contour.append( Circle( center, radius, ec='black',fc = 'none'))
# clipping circle
clipc = Circle( [0,0], smithR, ec='k',fc='None',visible=True)
ax1.add_patch( clipc)
#draw x and y axis
ax1.axhline(0, color='k', lw=.1, clip_path=clipc)
ax1.axvline(1*y_flip_sign, color='k', clip_path=clipc)
ax1.grid(0)
# Set axis limits by plotting white points so zooming works properly
ax1.plot(smithR*npy.array([-1.1, 1.1]), smithR*npy.array([-1.1, 1.1]), 'w.', markersize = 0)
ax1.axis('image') # Combination of 'equal' and 'tight'
if not border:
ax1.yaxis.set_ticks([])
ax1.xaxis.set_ticks([])
for loc, spine in ax1.spines.items():
spine.set_color('none')
if draw_labels:
#Clear axis
ax1.yaxis.set_ticks([])
ax1.xaxis.set_ticks([])
for loc, spine in ax1.spines.items():
spine.set_color('none')
# Make annotations only if the radius is 1
if smithR is 1:
#Make room for annotation
ax1.plot(npy.array([-1.25, 1.25]), npy.array([-1.1, 1.1]), 'w.', markersize = 0)
ax1.axis('image')
#Annotate real part
for value in rLightList:
# Set radius of real part's label; offset slightly left (Z
# chart, y_flip_sign == 1) or right (Y chart, y_flip_sign == -1)
# so label doesn't overlap chart's circles
rho = (value - 1)/(value + 1) - y_flip_sign*0.01
if y_flip_sign is 1:
halignstyle = "right"
else:
halignstyle = "left"
ax1.annotate(str(value*ref_imm), xy=(rho*smithR, 0.01),
xytext=(rho*smithR, 0.01), ha = halignstyle, va = "baseline")
#Annotate imaginary part
radialScaleFactor = 1.01 # Scale radius of label position by this
# factor. Making it >1 places the label
# outside the Smith chart's circle
for value in xLightList:
#Transforms from complex to cartesian
S = (1j*value - 1) / (1j*value + 1)
S *= smithR * radialScaleFactor
rhox = S.real
rhoy = S.imag * y_flip_sign
# Choose alignment anchor point based on label's value
if ((value == 1.0) or (value == -1.0)):
halignstyle = "center"
elif (rhox < 0.0):
halignstyle = "right"
else:
halignstyle = "left"
if (rhoy < 0):
valignstyle = "top"
else:
valignstyle = "bottom"
#Annotate value
ax1.annotate(str(value*ref_imm) + 'j', xy=(rhox, rhoy),
xytext=(rhox, rhoy), ha = halignstyle, va = valignstyle)
#Annotate 0 and inf
ax1.annotate('0.0', xy=(-1.02, 0), xytext=(-1.02, 0),
ha = "right", va = "center")
ax1.annotate('$\infty$', xy=(radialScaleFactor, 0), xytext=(radialScaleFactor, 0),
ha = "left", va = "center")
# loop though contours and draw them on the given axes
for currentContour in contour:
cc=ax1.add_patch(currentContour)
cc.set_clip_path(clipc)
def plot_rectangular(x, y, x_label=None, y_label=None, title=None,
show_legend=True, axis='tight', ax=None, *args, **kwargs):
'''
plots rectangular data and optionally label axes.
Parameters
------------
z : array-like, of complex data
data to plot
x_label : string
x-axis label
y_label : string
y-axis label
title : string
plot title
show_legend : Boolean
controls the drawing of the legend
ax : :class:`matplotlib.axes.AxesSubplot` object
axes to draw on
*args,**kwargs : passed to pylab.plot
'''
if ax is None:
ax = plb.gca()
my_plot = ax.plot(x, y, *args, **kwargs)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if title is not None:
ax.set_title(title)
if show_legend:
# only show legend if they provide a label
if 'label' in kwargs:
ax.legend()
if axis is not None:
ax.autoscale(True, 'x', True)
ax.autoscale(True, 'y', False)
if plb.isinteractive():
plb.draw()
return my_plot
def plot_polar(theta, r, x_label=None, y_label=None, title=None,
show_legend=True, axis_equal=False, ax=None, *args, **kwargs):
'''
plots polar data on a polar plot and optionally label axes.
Parameters
------------
theta : array-like
data to plot
r : array-like
x_label : string
x-axis label
y_label : string
y-axis label
title : string
plot title
show_legend : Boolean
controls the drawing of the legend
ax : :class:`matplotlib.axes.AxesSubplot` object
axes to draw on
*args,**kwargs : passed to pylab.plot
See Also
----------
plot_rectangular : plots rectangular data
plot_complex_rectangular : plot complex data on complex plane
plot_polar : plot polar data
plot_complex_polar : plot complex data on polar plane
plot_smith : plot complex data on smith chart
'''
if ax is None:
ax = plb.gca(polar=True)
ax.plot(theta, r, *args, **kwargs)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if title is not None:
ax.set_title(title)
if show_legend:
# only show legend if they provide a label
if 'label' in kwargs:
ax.legend()
if axis_equal:
ax.axis('equal')
if plb.isinteractive():
plb.draw()
def plot_complex_rectangular(z, x_label='Real', y_label='Imag',
title='Complex Plane', show_legend=True, axis='equal', ax=None,
*args, **kwargs):
'''
plot complex data on the complex plane
Parameters
------------
z : array-like, of complex data
data to plot
x_label : string
x-axis label
y_label : string
y-axis label
title : string
plot title
show_legend : Boolean
controls the drawing of the legend
ax : :class:`matplotlib.axes.AxesSubplot` object
axes to draw on
*args,**kwargs : passed to pylab.plot
See Also
----------
plot_rectangular : plots rectangular data
plot_complex_rectangular : plot complex data on complex plane
plot_polar : plot polar data
plot_complex_polar : plot complex data on polar plane
plot_smith : plot complex data on smith chart
'''
x = npy.real(z)
y = npy.imag(z)
plot_rectangular(x=x, y=y, x_label=x_label, y_label=y_label,
title=title, show_legend=show_legend, axis=axis,
ax=ax, *args, **kwargs)
def plot_complex_polar(z, x_label=None, y_label=None,
title=None, show_legend=True, axis_equal=False, ax=None,
*args, **kwargs):
'''
plot complex data in polar format.
Parameters
------------
z : array-like, of complex data
data to plot
x_label : string
x-axis label
y_label : string
y-axis label
title : string
plot title
show_legend : Boolean
controls the drawing of the legend
ax : :class:`matplotlib.axes.AxesSubplot` object
axes to draw on
*args,**kwargs : passed to pylab.plot
See Also
----------
plot_rectangular : plots rectangular data
plot_complex_rectangular : plot complex data on complex plane
plot_polar : plot polar data
plot_complex_polar : plot complex data on polar plane
plot_smith : plot complex data on smith chart
'''
theta = npy.angle(z)
r = npy.abs(z)
plot_polar(theta=theta, r=r, x_label=x_label, y_label=y_label,
title=title, show_legend=show_legend, axis_equal=axis_equal,
ax=ax, *args, **kwargs)
def plot_smith(s, smith_r=1, chart_type='z', x_label='Real',
y_label='Imaginary', title='Complex Plane', show_legend=True,
axis='equal', ax=None, force_chart = False, *args, **kwargs):
'''
plot complex data on smith chart
Parameters
------------
s : complex array-like
reflection-coeffient-like data to plot
smith_r : number
radius of smith chart
chart_type : ['z','y']
Contour type for chart.
* *'z'* : lines of constant impedance
* *'y'* : lines of constant admittance
x_label : string
x-axis label
y_label : string
y-axis label
title : string
plot title
show_legend : Boolean
controls the drawing of the legend
axis_equal: Boolean
sets axis to be equal increments (calls axis('equal'))
force_chart : Boolean
forces the re-drawing of smith chart
ax : :class:`matplotlib.axes.AxesSubplot` object
axes to draw on
*args,**kwargs : passed to pylab.plot
See Also
----------
plot_rectangular : plots rectangular data
plot_complex_rectangular : plot complex data on complex plane
plot_polar : plot polar data
plot_complex_polar : plot complex data on polar plane
plot_smith : plot complex data on smith chart
'''
if ax is None:
ax = plb.gca()
# test if smith chart is already drawn
if not force_chart:
if len(ax.patches) == 0:
smith(ax=ax, smithR = smith_r, chart_type=chart_type)
plot_complex_rectangular(s, x_label=x_label, y_label=y_label,
title=title, show_legend=show_legend, axis=axis,
ax=ax, *args, **kwargs)
ax.axis(smith_r*npy.array([-1.1, 1.1, -1.1, 1.1]))
if plb.isinteractive():
plb.draw()
def subplot_params(ntwk, param='s', proj='db', size_per_port=4, newfig=True,
add_titles=True, keep_it_tight=True, subplot_kw={}, *args, **kw):
'''
Plot all networks parameters individually on subplots
Parameters
--------------
'''
if newfig:
f,axs= plb.subplots(ntwk.nports,ntwk.nports,
figsize =(size_per_port*ntwk.nports,
size_per_port*ntwk.nports ),
**subplot_kw)
else:
f = plb.gcf()
axs = npy.array(f.get_axes())
for ports,ax in zip(ntwk.port_tuples, axs.flatten()):
plot_func = ntwk.__getattribute__('plot_%s_%s'%(param, proj))
plot_func(m=ports[0], n=ports[1], ax=ax,*args, **kw)
if add_titles:
ax.set_title('%s%i%i'%(param.upper(),ports[0]+1, ports[1]+1))
if keep_it_tight:
plb.tight_layout()
return f,axs
def shade_bands(edges, y_range=None,cmap='prism', **kwargs):
'''
Shades frequency bands.
when plotting data over a set of frequency bands it is nice to
have each band visually separated from the other. The kwarg `alpha`
is useful.
Parameters
--------------
edges : array-like
x-values separating regions of a given shade
y_range : tuple
y-values to shade in
cmap : str
see matplotlib.cm or matplotlib.colormaps for acceptable values
\*\* : key word arguments
passed to `matplotlib.fill_between`
Examples
-----------
>>> rf.shade_bands([325,500,750,1100], alpha=.2)
'''
cmap = plb.cm.get_cmap(cmap)
y_range=plb.gca().get_ylim()
axis = plb.axis()
for k in range(len(edges)-1):
plb.fill_between(
[edges[k],edges[k+1]],
y_range[0], y_range[1],
color = cmap(1.0*k/len(edges)),
**kwargs)
plb.axis(axis)
def save_all_figs(dir = './', format=None, replace_spaces = True, echo = True):
'''
Save all open Figures to disk.
Parameters
------------
dir : string
path to save figures into
format : None, or list of strings
the types of formats to save figures as. The elements of this
list are passed to :matplotlib:`savefig`. This is a list so that
you can save each figure in multiple formats.
echo : bool
True prints filenames as they are saved
'''
if dir[-1] != '/':
dir = dir + '/'
for fignum in plb.get_fignums():
fileName = plb.figure(fignum).get_axes()[0].get_title()
if replace_spaces:
fileName = fileName.replace(' ','_')
if fileName == '':
fileName = 'unnamedPlot'
if format is None:
plb.savefig(dir+fileName)
if echo:
print((dir+fileName))
else:
for fmt in format:
plb.savefig(dir+fileName+'.'+fmt, format=fmt)
if echo:
print((dir+fileName+'.'+fmt))
saf = save_all_figs
def add_markers_to_lines(ax=None,marker_list=['o','D','s','+','x'], markevery=10):
'''
adds markers to existing lings on a plot
this is convinient if you have already have a plot made, but then
need to add markers afterwards, so that it can be interpreted in
black and white. The markevery argument makes the markers less
frequent than the data, which is generally what you want.
Parameters
-----------
ax : matplotlib.Axes
axis which to add markers to, defaults to gca()
marker_list : list of marker characters
see matplotlib.plot help for possible marker characters
markevery : int
markevery number of points with a marker.
'''
if ax is None:
ax=plb.gca()
lines = ax.get_lines()
if len(lines) > len (marker_list ):
marker_list *= 3
[k[0].set_marker(k[1]) for k in zip(lines, marker_list)]
[line.set_markevery(markevery) for line in lines]
def legend_off(ax=None):
'''
turn off the legend for a given axes.
if no axes is given then it will use current axes.
Parameters
-----------
ax : matplotlib.Axes object
axes to operate on
'''
if ax is None:
plb.gca().legend_.set_visible(0)
else:
ax.legend_.set_visible(0)
def scrape_legend(n=None, ax=None):
'''
scrapes a legend with redundant labels
Given a legend of m entries of n groups, this will remove all but
every m/nth entry. This is used when you plot many lines representing
the same thing, and only want one label entry in the legend for the
whole ensemble of lines
'''
if ax is None:
ax = plb.gca()
handles, labels = ax.get_legend_handles_labels()
if n is None:
n =len ( set(labels))
if n>len(handles):
raise ValueError('number of entries is too large')
k_list = [int(k) for k in npy.linspace(0,len(handles)-1,n)]
ax.legend([handles[k] for k in k_list], [labels[k] for k in k_list])
def func_on_all_figs(func, *args, **kwargs):
'''
runs a function after making all open figures current.
useful if you need to change the properties of many open figures
at once, like turn off the grid.
Parameters
----------
func : function
function to call
\*args, \*\*kwargs : pased to func
Examples
----------
>>> rf.func_on_all_figs(grid,alpha=.3)
'''
for fig_n in plb.get_fignums():
fig = plb.figure(fig_n)
for ax_n in fig.axes:
fig.add_axes(ax_n) # trick to make axes current
func(*args, **kwargs)
plb.draw()
foaf = func_on_all_figs
def plot_vector(a, off=0+0j, *args, **kwargs):
'''
plot a 2d vector
'''
return quiver(off.real,off.imag,a.real,a.imag,scale_units ='xy',
angles='xy',scale=1, *args, **kwargs)
def colors():
return rcParams['axes.color_cycle']
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| unlicense |
kelle/astropy | astropy/visualization/wcsaxes/frame.py | 4 | 7481 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import abc
from collections import OrderedDict
import numpy as np
from ...extern import six
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = ['Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame']
class Spine(object):
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
def __init__(self, parent_axes, transform):
self.parent_axes = parent_axes
self.transform = transform
self.data = None
self.pixel = None
self.world = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = value
self._pixel = self.parent_axes.transData.transform(self._data)
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._pixel = value
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
if value is None:
self._data = None
self._pixel = None
self._world = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._world = value
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
@six.add_metaclass(abc.ABCMeta)
class BaseFrame(OrderedDict):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
def __init__(self, parent_axes, transform, path=None):
super(BaseFrame, self).__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = 1
self._color = 'black'
self._path = path
for axis in self.spine_names:
self[axis] = Spine(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return 'lower' if ymin < ymax else 'upper'
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(self._path, transform=self.parent_axes.transData,
facecolor='white', edgecolor='white')
def draw(self, renderer):
for axis in self:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
p = np.linspace(0., 1., data.shape[0])
p_new = np.linspace(0., 1., n_samples)
spines[axis] = Spine(self.parent_axes, self.transform)
spines[axis].data = np.array([np.interp(p_new, p, data[:, 0]),
np.interp(p_new, p, data[:, 1])]).transpose()
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : string
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
@abc.abstractmethod
def update_spines(self):
raise NotImplementedError("")
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = 'brtl'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self['b'].data = np.array(([xmin, ymin], [xmax, ymin]))
self['r'].data = np.array(([xmax, ymin], [xmax, ymax]))
self['t'].data = np.array(([xmax, ymax], [xmin, ymax]))
self['l'].data = np.array(([xmin, ymax], [xmin, ymin]))
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = 'chv'
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0., 2 * np.pi, 1000)
self['c'].data = np.array([xmid + dx * np.cos(theta),
ymid + dy * np.sin(theta)]).transpose()
self['h'].data = np.array([np.linspace(xmin, xmax, 1000),
np.repeat(ymid, 1000)]).transpose()
self['v'].data = np.array([np.repeat(xmid, 1000),
np.linspace(ymin, ymax, 1000)]).transpose()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle."""
self.update_spines()
vertices = self['c'].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn."""
axis = 'c'
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
| bsd-3-clause |
AutonomyLab/deep_intent | code/autoencoder_model/scripts/thesis_scripts/latentspace_rendec16.py | 1 | 24665 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hickle as hkl
import numpy as np
np.random.seed(9 ** 10)
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras import regularizers
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers.core import Activation
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import UpSampling3D
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.callbacks import LearningRateScheduler
from keras.layers.advanced_activations import LeakyReLU
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error as mse
from plot_results import plot_err_variation
from keras.layers import Input
from keras.models import Model
from config_latent import *
from sys import stdout
from keras.layers.core import Lambda
import tb_callback
import lrs_callback
import argparse
import cv2
import os
def encoder_model():
inputs = Input(shape=(int(VIDEO_LENGTH/2), 128, 208, 3))
# 10x128x128
conv_1 = Conv3D(filters=128,
strides=(1, 4, 4),
dilation_rate=(1, 1, 1),
kernel_size=(3, 11, 11),
padding='same')(inputs)
x = TimeDistributed(BatchNormalization())(conv_1)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_1 = TimeDistributed(Dropout(0.5))(x)
conv_2a = Conv3D(filters=64,
strides=(1, 1, 1),
dilation_rate=(2, 1, 1),
kernel_size=(2, 5, 5),
padding='same')(out_1)
x = TimeDistributed(BatchNormalization())(conv_2a)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_2a = TimeDistributed(Dropout(0.5))(x)
conv_2b = Conv3D(filters=64,
strides=(1, 1, 1),
dilation_rate=(2, 1, 1),
kernel_size=(2, 5, 5),
padding='same')(out_2a)
x = TimeDistributed(BatchNormalization())(conv_2b)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_2b = TimeDistributed(Dropout(0.5))(x)
conv_2c = TimeDistributed(Conv2D(filters=64,
kernel_size=(1, 1),
strides=(1, 1),
padding='same'))(out_1)
x = TimeDistributed(BatchNormalization())(conv_2c)
out_1_less = TimeDistributed(LeakyReLU(alpha=0.2))(x)
res_1 = add([out_1_less, out_2b])
# res_1 = LeakyReLU(alpha=0.2)(res_1)
conv_3 = Conv3D(filters=64,
strides=(1, 2, 2),
dilation_rate=(1, 1, 1),
kernel_size=(3, 5, 5),
padding='same')(res_1)
x = TimeDistributed(BatchNormalization())(conv_3)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_3 = TimeDistributed(Dropout(0.5))(x)
# 10x16x16
conv_4a = Conv3D(filters=64,
strides=(1, 1, 1),
dilation_rate=(2, 1, 1),
kernel_size=(2, 3, 3),
padding='same')(out_3)
x = TimeDistributed(BatchNormalization())(conv_4a)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_4a = TimeDistributed(Dropout(0.5))(x)
conv_4b = Conv3D(filters=64,
strides=(1, 1, 1),
dilation_rate=(2, 1, 1),
kernel_size=(2, 3, 3),
padding='same')(out_4a)
x = TimeDistributed(BatchNormalization())(conv_4b)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_4b = TimeDistributed(Dropout(0.5))(x)
z = add([out_3, out_4b])
# res_1 = LeakyReLU(alpha=0.2)(res_1)
model = Model(inputs=inputs, outputs=[z, res_1])
return model
def decoder_model():
inputs = Input(shape=(int(VIDEO_LENGTH/2), 16, 26, 64))
residual_input = Input(shape=(int(VIDEO_LENGTH/2), 32, 52, 64), name='res_input')
# Adjust residual input
def adjust_res(x):
pad = K.zeros_like(x[:, 1:])
res = x[:, 0:1]
return K.concatenate([res, pad], axis=1)
enc_input = Lambda(adjust_res)(residual_input)
# 10x16x16
convlstm_1 = ConvLSTM2D(filters=64,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(inputs)
x = TimeDistributed(BatchNormalization())(convlstm_1)
out_1 = TimeDistributed(Activation('tanh'))(x)
convlstm_2 = ConvLSTM2D(filters=64,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(out_1)
x = TimeDistributed(BatchNormalization())(convlstm_2)
out_2 = TimeDistributed(Activation('tanh'))(x)
res_1 = add([inputs, out_2])
res_1 = UpSampling3D(size=(1, 2, 2))(res_1)
# 10x32x32
convlstm_3a = ConvLSTM2D(filters=64,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(res_1)
x = TimeDistributed(BatchNormalization())(convlstm_3a)
out_3a = TimeDistributed(Activation('tanh'))(x)
convlstm_3b = ConvLSTM2D(filters=64,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(out_3a)
x = TimeDistributed(BatchNormalization())(convlstm_3b)
out_3b = TimeDistributed(Activation('tanh'))(x)
res_2 = add([res_1, out_3b, enc_input])
res_2 = UpSampling3D(size=(1, 2, 2))(res_2)
# 10x64x64
convlstm_4a = ConvLSTM2D(filters=16,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(res_2)
x = TimeDistributed(BatchNormalization())(convlstm_4a)
out_4a = TimeDistributed(Activation('tanh'))(x)
convlstm_4b = ConvLSTM2D(filters=16,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(out_4a)
x = TimeDistributed(BatchNormalization())(convlstm_4b)
out_4b = TimeDistributed(Activation('tanh'))(x)
conv_4c = TimeDistributed(Conv2D(filters=16,
kernel_size=(1, 1),
strides=(1, 1),
padding='same'))(res_2)
x = TimeDistributed(BatchNormalization())(conv_4c)
res_2_less = TimeDistributed(Activation('tanh'))(x)
res_3 = add([res_2_less, out_4b])
res_3 = UpSampling3D(size=(1, 2, 2))(res_3)
# 10x128x128
convlstm_5 = ConvLSTM2D(filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(res_3)
predictions = TimeDistributed(Activation('tanh'))(convlstm_5)
model = Model(inputs=[inputs, residual_input], outputs=predictions)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def autoencoder_model(encoder, decoder):
# model = Sequential()
# model.add(encoder)
# model.add(decoder)
inputs = Input(shape=(int(VIDEO_LENGTH / 2), 128, 208, 3))
z, res = encoder(inputs)
future = decoder([z, res])
model = Model(inputs=inputs, outputs=future)
return model
def arrange_images(video_stack):
n_frames = video_stack.shape[0] * video_stack.shape[1]
frames = np.zeros((n_frames,) + video_stack.shape[2:], dtype=video_stack.dtype)
frame_index = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
frames[frame_index] = video_stack[i, j]
frame_index += 1
img_height = video_stack.shape[2]
img_width = video_stack.shape[3]
width = img_width * video_stack.shape[1]
height = img_height * video_stack.shape[0]
shape = frames.shape[1:]
image = np.zeros((height, width, shape[2]), dtype=video_stack.dtype)
frame_number = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
image[(i * img_height):((i + 1) * img_height), (j * img_width):((j + 1) * img_width)] = frames[frame_number]
frame_number = frame_number + 1
return image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS):
if PRINT_MODEL_SUMMARY:
print(encoder.summary())
print(decoder.summary())
print(autoencoder.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
print("Saving models to file...")
model_json = encoder.to_json()
with open(os.path.join(MODEL_DIR, "encoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = decoder.to_json()
with open(os.path.join(MODEL_DIR, "decoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = autoencoder.to_json()
with open(os.path.join(MODEL_DIR, "autoencoder.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
plot_model(encoder, to_file=os.path.join(MODEL_DIR, 'encoder.png'), show_shapes=True)
plot_model(decoder, to_file=os.path.join(MODEL_DIR, 'decoder.png'), show_shapes=True)
plot_model(autoencoder, to_file=os.path.join(MODEL_DIR, 'autoencoder.png'), show_shapes=True)
if ENC_WEIGHTS != "None":
print("Pre-loading encoder with weights...")
load_weights(ENC_WEIGHTS, encoder)
if DEC_WEIGHTS != "None":
print("Pre-loading decoder with weights...")
load_weights(DEC_WEIGHTS, decoder)
def load_to_RAM(frames_source):
frames = np.zeros(shape=((len(frames_source),) + IMG_SIZE))
print("Decimating RAM!")
j = 1
for i in range(1, len(frames_source)):
filename = "frame_" + str(j) + ".png"
im_file = os.path.join(DATA_DIR, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
frames[i] = (frame.astype(np.float32) - 127.5) / 127.5
j = j + 1
except AttributeError as e:
print(im_file)
print(e)
return frames
def load_X_RAM(videos_list, index, frames):
X = []
for i in range(BATCH_SIZE):
start_index = videos_list[(index * BATCH_SIZE + i), 0]
end_index = videos_list[(index * BATCH_SIZE + i), -1]
X.append(frames[start_index:end_index + 1])
X = np.asarray(X)
return X
def load_X(videos_list, index, data_dir, img_size, batch_size=BATCH_SIZE):
X = np.zeros((batch_size, VIDEO_LENGTH,) + img_size)
for i in range(batch_size):
for j in range(VIDEO_LENGTH):
filename = "frame_" + str(videos_list[(index * batch_size + i), j]) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print(im_file)
print(e)
return X
def get_video_lists(frames_source, stride):
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + stride
end_frame_index = end_frame_index + stride
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
return np.asarray(videos_list)
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
print("Loading data definitions...")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_208.hkl'))
videos_list = get_video_lists(frames_source=frames_source, stride=4)
n_videos = videos_list.shape[0]
# Setup test
val_frames_source = hkl.load(os.path.join(VAL_DATA_DIR, 'sources_val_208.hkl'))
val_videos_list = get_video_lists(frames_source=val_frames_source, stride=(int(VIDEO_LENGTH / 2)))
n_val_videos = val_videos_list.shape[0]
if RAM_DECIMATE:
frames = load_to_RAM(frames_source=frames_source)
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Build the Spatio-temporal Autoencoder
print("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
autoencoder.compile(loss="mean_squared_error", optimizer=OPTIM_A)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
NB_ITERATIONS = int(n_videos / BATCH_SIZE)
# NB_ITERATIONS = 5
NB_VAL_ITERATIONS = int(n_val_videos / BATCH_SIZE)
# Setup TensorBoard Callback
TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS.set_model(autoencoder)
print("Beginning Training...")
# Begin Training
for epoch in range(1, NB_EPOCHS_AUTOENCODER+1):
if epoch == 21:
autoencoder.compile(loss="mean_absolute_error", optimizer=OPTIM_B)
load_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_20.h5'), encoder)
load_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_20.h5'), decoder)
print("\n\nEpoch ", epoch)
loss = []
val_loss = []
# Set learning rate every epoch
LRS.on_epoch_begin(epoch=epoch)
lr = K.get_value(autoencoder.optimizer.lr)
print("Learning rate: " + str(lr))
for index in range(NB_ITERATIONS):
# Train Autoencoder
if RAM_DECIMATE:
X = load_X_RAM(videos_list, index, frames)
else:
X = load_X(videos_list, index, DATA_DIR, IMG_SIZE)
X_train = np.flip(X[:, 0: int(VIDEO_LENGTH / 2)], axis=1)
y_train = X[:, int(VIDEO_LENGTH / 2):]
loss.append(autoencoder.train_on_batch(X_train, y_train))
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"loss: " + str(loss[len(loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
predicted_images = autoencoder.predict(X_train, verbose=0)
voila = np.concatenate((X_train, y_train), axis=1)
truth_seq = arrange_images(voila)
pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
truth_seq = truth_seq * 127.5 + 127.5
pred_seq = pred_seq * 127.5 + 127.5
if epoch == 1:
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_truth.png"), truth_seq)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_pred.png"), pred_seq)
# Run over test data
print('')
for index in range(NB_VAL_ITERATIONS):
X = load_X(val_videos_list, index, VAL_DATA_DIR, IMG_SIZE)
X_val = np.flip(X[:, 0: int(VIDEO_LENGTH / 2)], axis=1)
y_val = X[:, int(VIDEO_LENGTH / 2):]
val_loss.append(autoencoder.test_on_batch(X_val, y_val))
arrow = int(index / (NB_VAL_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_VAL_ITERATIONS - 1) + " " +
"val_loss: " + str(val_loss[len(val_loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
# then after each epoch/iteration
avg_loss = sum(loss) / len(loss)
avg_val_loss = sum(val_loss) / len(val_loss)
logs = {'loss': avg_loss, 'val_loss': avg_val_loss}
TC.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses_gen.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, \"train_loss\":%f, \"val_loss\":%f}\n" % (epoch, avg_loss, avg_val_loss))
print("\nAvg train loss: " + str(avg_loss) + " Avg val loss: " + str(avg_val_loss))
# Save model weights per epoch to file
if epoch > 15 and epoch < 21:
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
if epoch > 25:
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'), True)
# decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# test(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'),
# os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'))
def test(ENC_WEIGHTS, DEC_WEIGHTS):
print('')
# Setup test
test_frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_208.hkl'))
test_videos_list = get_video_lists(frames_source=test_frames_source, stride=(int(VIDEO_LENGTH / 2)))
n_test_videos = test_videos_list.shape[0]
if not os.path.exists(TEST_RESULTS_DIR + '/truth/'):
os.mkdir(TEST_RESULTS_DIR + '/truth/')
if not os.path.exists(TEST_RESULTS_DIR + '/pred/'):
os.mkdir(TEST_RESULTS_DIR + '/pred/')
if not os.path.exists(TEST_RESULTS_DIR + '/graphs/'):
os.mkdir(TEST_RESULTS_DIR + '/graphs/')
os.mkdir(TEST_RESULTS_DIR + '/graphs/values/')
print("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
autoencoder.compile(loss="mean_absolute_error", optimizer=OPTIM_B)
# i = 0[[
# for layer in decoder.layers:
# print(layer, i)
# i = i + 1
# Build first decoder layer output
# intermediate_decoder = Model(inputs=decoder.layers[0].input, outputs=decoder.layers[7].output)
# print (intermediate_decoder.input_shape)
# inputs = Input(shape=(int(VIDEO_LENGTH / 2), 128, 208, 3))
# z_rep, res_rep = encoder(inputs)
# future = intermediate_decoder(z_rep)
# z_model = Model(inputs=inputs, outputs=future)
# z_model.compile(loss='mean_absolute_error', optimizer=OPTIM_B)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
NB_TEST_ITERATIONS = int(n_test_videos / TEST_BATCH_SIZE)
test_loss = []
mae_errors = np.zeros(shape=(n_test_videos, int(VIDEO_LENGTH/2) + 1))
mse_errors = np.zeros(shape=(n_test_videos, int(VIDEO_LENGTH/2) + 1))
z_all = []
NB_TEST_ITERATIONS = [298, 341]
for index in NB_TEST_ITERATIONS:
X = load_X(test_videos_list, index, TEST_DATA_DIR, IMG_SIZE, batch_size=TEST_BATCH_SIZE)
X_test = np.flip(X[:, 0: int(VIDEO_LENGTH / 2)], axis=1)
y_test = X[:, int(VIDEO_LENGTH / 2):]
test_loss.append(autoencoder.test_on_batch(X_test, y_test))
# arrow = int(index / (NB_TEST_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + " " +
"test_loss: " + str(test_loss[len(test_loss) - 1]))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
z, res = encoder.predict(X_test, verbose=0)
# z = z_model.predict(X_test, verbose=0)
# z_all.append(z)
z_new = np.zeros(shape=(TEST_BATCH_SIZE, 1, 16, 26, 64))
z_new[0] = z[:, 16]
z_new = np.repeat(z_new, int(VIDEO_LENGTH/2), axis=1)
predicted_images = decoder.predict([z_new, res], verbose=0)
voila = np.concatenate((X_test, y_test), axis=1)
truth_seq = arrange_images(voila)
pred_seq = arrange_images(np.concatenate((X_test, predicted_images), axis=1))
truth_seq = truth_seq * 127.5 + 127.5
pred_seq = pred_seq * 127.5 + 127.5
# mae_error = []
# mse_error = []
# for i in range(int(VIDEO_LENGTH / 2)):
# mae_errors[index, i] = (mae(y_test[0, i].flatten(), predicted_images[0, i].flatten()))
# mae_error.append(mae_errors[index, i])
#
#
# mse_errors[index, i] = (mse(y_test[0, i].flatten(), predicted_images[0, i].flatten()))
# mse_error.append(mse_errors[index, i])
#
# dc_mae = mae(X_test[0, 0].flatten(), y_test[0, 0].flatten())
# mae_errors[index, -1] = dc_mae
# dc_mse = mse(X_test[0, 0].flatten(), y_test[0, 0].flatten())
# mse_errors[index, -1] = dc_mse
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/truth/', str(index) + "_truth.png"), truth_seq)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/pred/', str(index) + "_pred.png"), pred_seq)
# plot_err_variation(mae_error, index, dc_mae, 'mae')
# plot_err_variation(mse_error, index, dc_mse, 'mse')
np.save(os.path.join(TEST_RESULTS_DIR + '/graphs/values/', str(index) + "_mae.npy"), np.asarray(mae_errors))
np.save(os.path.join(TEST_RESULTS_DIR + '/graphs/values/', str(index) + "_mse.npy"), np.asarray(mse_errors))
np.save(os.path.join(TEST_RESULTS_DIR + '/graphs/values/', "z_all.npy"), np.asarray(z_all))
# then after each epoch/iteration
avg_test_loss = sum(test_loss) / len(test_loss)
np.save(TEST_RESULTS_DIR + 'test_loss.npy', np.asarray(test_loss))
print("\nAvg loss: " + str(avg_test_loss))
print("\n Std: " + str(np.std(np.asarray(test_loss))))
print("\n Variance: " + str(np.var(np.asarray(test_loss))))
print("\n Mean: " + str(np.mean(np.asarray(test_loss))))
print("\n Max: " + str(np.max(np.asarray(test_loss))))
print("\n Min: " + str(np.min(np.asarray(test_loss))))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--gen_weights", type=str, default="None")
parser.add_argument("--dis_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
if args.mode == "test":
test(ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
# if args.mode == "test_ind":
# test_ind(ENC_WEIGHTS=args.enc_weights,
# DEC_WEIGHTS=args.dec_weights)
| bsd-3-clause |
meduz/scikit-learn | examples/missing_values.py | 71 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
ephes/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
appapantula/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
bthirion/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 5 | 3875 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for Bayesian Ridge Regression
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights, histogram of the weights, and
# predictions with standard deviations
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True,
edgecolor='black')
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=0.1)
clf_poly = BayesianRidge()
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial Bayesian Ridge Regression", linewidth=lw)
plt.plot(X_plot, y_plot, color='gold', linewidth=lw,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
| bsd-3-clause |
dcherian/tools | ROMS/pmacc/tools/post_tools/rompy/trunk/make_standard_images.py | 1 | 9610 | #!/usr/bin/env python
import os
import glob
from optparse import OptionParser
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from rompy import rompy, plot_utils, utils, extract_utils
def surface_map(file,img_file=None,varname='salt',clim=None):
(data, coords) = rompy.extract(file,varname=varname,extraction_type='surface')
# plot_utils.plot_surface(coords['xm'],coords['ym'],data)
title = '%s %s %s %s' % ( extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt) )
plot_utils.plot_map(coords['xm'],coords['ym'],data,filename=img_file, clim=clim, title=title, caxis_label=clabel_map[varname])
def main_basin_curtain(file,img_file,varname,n=4,clim=None): # Main Basin
if varname == 'U':
main_basin_U_curtain(file,img_file,n,clim)
else:
x,y = utils.high_res_main_basin_xy(n=n)
(data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y)
title = '%s %s Main Basin %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(coords=coords, data=data, varname=varname, region='Main Basin', filename=img_file, n=n, x_axis_offset=utils.offset_region(coords), clim=clim,cmap='banas_hsv_cm',labeled_contour_gap=2, title=title, caxis_label=clabel_map[varname])
def hood_canal_curtain(file,img_file,varname,n=1,clim=None): # Hood Canal
if varname == 'U':
hood_canal_U_curtain(file,img_file,n,clim)
else:
x,y = utils.high_res_hood_canal_xy(n=n)
(data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y)
title = '%s %s Hood Canal %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(coords=coords, data=data, varname=varname, region='Hood Canal', filename=img_file, n=n, x_axis_offset=utils.offset_region(coords), clim=clim, cmap='banas_hsv_cm',labeled_contour_gap=2, title=title, caxis_label=clabel_map[varname])
def hood_canal_U_curtain(file,img_file,n=1,clim=None): # velocity in Hood Canal
x,y = utils.high_res_hood_canal_xy(n=n)
(u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
title = '%s %s Hood Canal %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt))
hood_U_clim = (np.array(clim)/2.0).tolist()
plot_utils.plot_parker(coords=coords,data=data,varname='U', region='Hood Canal', filename=img_file, n=n, clim=clim, x_axis_offset=utils.offset_region(coords), cmap='red_blue', title=title, caxis_label=clabel_map['U'])
def main_basin_U_curtain(file,img_file,n=1,clim=None): # velocity in Main Basin
x,y = utils.high_res_main_basin_xy(n=n)
(u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
title = '%s %s Main Basin %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(coords=coords,data=data,varname='U', region=' Main Basin', filename=img_file, n=n, clim=clim, x_axis_offset=utils.offset_region(coords),cmap='red_blue', title=title, caxis_label=clabel_map['U'])
def daves_curtain(file,img_file,section,varname,clim=None):
if varname == 'U':
daves_U_curtain(file,img_file,section,varname,clim)
else:
x = utils.get_daves_section_var(section=section,var='lon')
y = utils.get_daves_section_var(section=section,var='lat')
(data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y)
title = '%s %s %s %s %s' % (extract_utils.run_title(file), os.path.basename(file), section, var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(
coords=coords,
data=data,
filename=img_file,
title=title,
x_axis_offset=utils.offset_region(coords),
clim=clim,
cmap='banas_hsv_cm',
labeled_contour_gap=2,
caxis_label=clabel_map[varname],
inset=inset_dict[section],
ctd_ind=ctd_ind_dict[section],
label=utils.get_daves_section_var(section=section,var='label'),
label_ind=utils.get_daves_section_var(section=section,var='label_ind')
)
return
def daves_U_curtain(file,img_file,section,varname,clim):
x = utils.get_daves_section_var(section=section,var='lon')
y = utils.get_daves_section_var(section=section,var='lat')
(u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
title = '%s %s %s %s %s' % (extract_utils.run_title(file), os.path.basename(file), section, var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(
coords=coords,
data=data,
filename=img_file,
title=title,
x_axis_offset=utils.offset_region(coords),
clim=clim,
cmap='red_blue',
caxis_label=clabel_map[varname],
inset=inset_dict[section],
ctd_ind=ctd_ind_dict[section],
label=utils.get_daves_section_var(section=section,var='label'),
label_ind=utils.get_daves_section_var(section=section,var='label_ind')
)
return
# begin actual code that runs.
parser = OptionParser()
parser.add_option('-i', '--img_dir',
dest='img_dir',
default='./image_sequence',
help='Location to save images. Default is ./image_sequnce')
(options, args) = parser.parse_args()
if args == []:
fl = glob.glob('ocean_his*.nc')
print(fl)
file_list = [fl[0]]
else:
file_list = args
img_dir = options.img_dir
var_list = ['salt','temp','U']
var_title_map = {'salt':'Salinity','temp':'Temperature','U':'Velocity'}
title_time_fmt = '%Y-%m-%d %H:%M UTC'
clims = {'salt':[0, 21,33, 33], 'temp': [8, 20], 'U':[-2,2]}
clabel_map = {'temp': u'\u00B0 C', 'salt': 'psu', 'U': 'm/s'}
inset_dict = {'AI_HC':'Puget Sound','AI_WB':'Puget Sound','JdF_SoG':'Strait of Georgia','JdF_SS':'Puget Sound','JdF_PS':'JdF_PS'}
ctd_ind_dict = {
'AI_HC':utils.get_daves_section_var(section='AI_HC',var='PRISM_ctd_ind'),
'AI_WB':utils.get_daves_section_var(section='AI_WB',var='PRISM_ctd_ind'),
'JdF_SoG':utils.get_daves_section_var(section='JdF_SoG',var='IOS_ctd_ind'),
'JdF_SS':utils.get_daves_section_var(section='JdF_SS',var='PRISM_ctd_ind'),
'JdF_PS':utils.get_daves_section_var(section='JdF_PS',var='PRISM_ctd_ind')
}
ctd_ind_dict['JdF_SoG'].extend(utils.get_daves_section_var(section='JdF_SoG',var='JEMS_ctd_ind'))
for file in file_list:
ncf_index = os.path.basename(file)[:-3]
print('%s' %ncf_index)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
for var in var_list:
hood_img_file = '%s/%s_hood_%s.png' %(img_dir, ncf_index,var)
main_img_file = '%s/%s_main_%s.png' %(img_dir, ncf_index,var)
surface_img_file = '%s/%s_surface_%s.png' % (img_dir, ncf_index, var)
AI_HC_img_file = '%s/AI_HC_%s_%s.png' %(img_dir,var,ncf_index)
AI_WB_img_file = '%s/AI_WB_%s_%s.png' %(img_dir,var,ncf_index)
JdF_SoG_img_file = '%s/JdF_SoG_%s_%s.png' %(img_dir,var,ncf_index)
JdF_SS_img_file = '%s/JdF_SS_%s_%s.png' %(img_dir,var,ncf_index)
JdF_PS_img_file = '%s/JdF_PS_%s_%s.png' %(img_dir,var,ncf_index)
print('making hood canal %s' % var)
hood_canal_curtain(file, hood_img_file, var, n=8, clim=clims[var])
print('making main basin %s' % var)
main_basin_curtain(file, main_img_file, var, n=8, clim=clims[var])
print('making AI_HC %s' % var)
daves_curtain(file,AI_HC_img_file,section='AI_HC',varname=var,clim=clims[var])
print('making AI_WB %s' % var)
daves_curtain(file,AI_WB_img_file,section='AI_WB',varname=var,clim=clims[var])
print('making JdF_SoG %s' % var)
daves_curtain(file,JdF_SoG_img_file,section='JdF_SoG',varname=var,clim=clims[var])
print('making JdF_SS %s' % var)
daves_curtain(file,JdF_SS_img_file,section='JdF_SS',varname=var,clim=clims[var])
print('making JdF_PS %s' % var)
daves_curtain(file,JdF_PS_img_file,section='JdF_PS',varname=var,clim=clims[var])
if not var == 'U':
print('making surface %s' % var)
surface_map(file,surface_img_file,var,clim=clims[var])
| mit |
joerocklin/gem5 | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 55 | 7386 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
HIPS/autograd | examples/data.py | 3 | 2765 | from __future__ import absolute_import
import matplotlib.pyplot as plt
import matplotlib.image
import autograd.numpy as np
import autograd.numpy.random as npr
import data_mnist
def load_mnist():
partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))
one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int)
train_images, train_labels, test_images, test_labels = data_mnist.mnist()
train_images = partial_flatten(train_images) / 255.0
test_images = partial_flatten(test_images) / 255.0
train_labels = one_hot(train_labels, 10)
test_labels = one_hot(test_labels, 10)
N_data = train_images.shape[0]
return N_data, train_images, train_labels, test_images, test_labels
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
cmap=matplotlib.cm.binary, vmin=None, vmax=None):
"""Images should be a (N_images x pixels) matrix."""
N_images = images.shape[0]
N_rows = (N_images - 1) // ims_per_row + 1
pad_value = np.min(images.ravel())
concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
(digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
for i in range(N_images):
cur_image = np.reshape(images[i, :], digit_dimensions)
row_ix = i // ims_per_row
col_ix = i % ims_per_row
row_start = padding + (padding + digit_dimensions[0]) * row_ix
col_start = padding + (padding + digit_dimensions[1]) * col_ix
concat_images[row_start: row_start + digit_dimensions[0],
col_start: col_start + digit_dimensions[1]] = cur_image
cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
return cax
def save_images(images, filename, **kwargs):
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
plot_images(images, ax, **kwargs)
fig.patch.set_visible(False)
ax.patch.set_visible(False)
plt.savefig(filename)
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
rs=npr.RandomState(0)):
"""Based on code by Ryan P. Adams."""
rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)
features = rs.randn(num_classes*num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:,0])
rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
rotations = np.reshape(rotations.T, (-1, 2, 2))
return np.einsum('ti,tij->tj', features, rotations)
| mit |
kchodorow/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
matplotlib/mpl-probscale | probscale/transforms.py | 1 | 3851 | import numpy
from matplotlib.transforms import Transform
def _mask_out_of_bounds(a):
"""
Return a Numpy array where all values outside ]0, 1[ are
replaced with NaNs. If all values are inside ]0, 1[, the original
array is returned.
"""
a = numpy.array(a, float)
mask = (a <= 0.0) | (a >= 1.0)
if mask.any():
return numpy.where(mask, numpy.nan, a)
return a
def _clip_out_of_bounds(a):
"""
Return a Numpy array where all values outside ]0, 1[ are
replaced with eps or 1 - eps. If all values are inside ]0, 1[
the original array is returned. (eps = 1e-300)
"""
a = numpy.array(a, float)
a[a <= 0.0] = 1e-300
a[a >= 1.0] = 1 - 1e-300
return a
class _ProbTransformMixin(Transform):
"""
Mixin for MPL axes transform for quantiles/probabilities or
percentages.
"""
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, dist, as_pct=True, out_of_bounds='mask'):
Transform.__init__(self)
self.dist = dist
self.as_pct = as_pct
self.out_of_bounds = out_of_bounds
if self.as_pct:
self.factor = 100.0
else:
self.factor = 1.0
if self.out_of_bounds == 'mask':
self._handle_out_of_bounds = _mask_out_of_bounds
elif self.out_of_bounds == 'clip':
self._handle_out_of_bounds = _clip_out_of_bounds
else:
raise ValueError("`out_of_bounds` muse be either 'mask' or 'clip'")
class ProbTransform(_ProbTransformMixin):
"""
MPL axes tranform class to convert quantiles to probabilities
or percents.
Parameters
----------
dist : scipy.stats distribution
The distribution whose ``cdf`` and ``pdf`` methods wiil set the
scale of the axis.
as_pct : bool, optional (True)
Toggles the formatting of the probabilities associated with the
tick labels as percentanges (0 - 100) or fractions (0 - 1).
out_of_bounds : string, optionals ('mask' or 'clip')
Determines how data outside the range of valid values is
handled. The default behavior is to mask the data.
Alternatively, the data can be clipped to values arbitrarily
close to the limits of the scale.
"""
def transform_non_affine(self, prob):
with numpy.errstate(divide="ignore", invalid="ignore"):
prob = self._handle_out_of_bounds(
numpy.asarray(prob) / self.factor
)
q = self.dist.ppf(prob)
return q
def inverted(self):
return QuantileTransform(self.dist, as_pct=self.as_pct,
out_of_bounds=self.out_of_bounds)
class QuantileTransform(_ProbTransformMixin):
"""
MPL axes tranform class to convert probabilities or percents to
quantiles.
Parameters
----------
dist : scipy.stats distribution
The distribution whose ``cdf`` and ``pdf`` methods wiil set the
scale of the axis.
as_pct : bool, optional (True)
Toggles the formatting of the probabilities associated with the
tick labels as percentanges (0 - 100) or fractions (0 - 1).
out_of_bounds : string, optionals ('mask' or 'clip')
Determines how data outside the range of valid values is
handled. The default behavior is to mask the data.
Alternatively, the data can be clipped to values arbitrarily
close to the limits of the scale.
"""
def transform_non_affine(self, q):
with numpy.errstate(divide="ignore", invalid="ignore"):
prob = self.dist.cdf(q) * self.factor
return prob
def inverted(self):
return ProbTransform(self.dist, as_pct=self.as_pct,
out_of_bounds=self.out_of_bounds)
| bsd-3-clause |
cuemacro/chartpy | chartpy_examples/dashboard_examples/layoutchart.py | 1 | 4000 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2021 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas as pd
import datetime
import math
import quandl
from chartpy import Chart, Style
from chartpy.dashboard import LayoutCanvas, CallbackManager
class LayoutChart(LayoutCanvas):
def __init__(self, app=None, constants=None, quandl_api_key=None):
super().__init__(app=app, constants=constants)
self._callback_manager = CallbackManager(constants=constants)
self._drop_down_width = 120
self._quandl_api_key = quandl_api_key
quandl.ApiConfig.api_key = self._quandl_api_key
self.attach_callbacks()
def attach_callbacks(self):
output = self._callback_manager.output_callback(self.page_id(),
['spot-fig',
'vol-fig',
'msg-status'])
input = self._callback_manager.input_callback(self.page_id(), 'calculate-button')
state = self._callback_manager.state_callback(self.page_id(), ['ticker-val'])
self._app.callback(*output, *input, *state)(self.calculate_button)
def calculate_button(self, *args):
n_clicks, ticker = args
if ticker == '': return {}, "Here is an example of using chartpy with dash"
try:
df = pd.DataFrame(quandl.get(ticker))
df_vol = (df / df.shift(1)).rolling(window=20).std()* math.sqrt(252) * 100.0
spot_fig = Chart(engine="plotly").plot(df,
style=Style(title='Spot', plotly_plot_mode='dash', width=980, height=480, scale_factor=-1))
vol_fig = Chart(engine="plotly").plot(df_vol,
style=Style(title='Realized Vol 1M', plotly_plot_mode='dash', width=980,
height=480, scale_factor=-1))
msg = "Plotted " + ticker + " at " + datetime.datetime.utcnow().strftime("%b %d %Y %H:%M:%S")
return spot_fig, vol_fig, msg
except Exception as e:
print(str(e))
pass
return {}, "Failed to download"
def page_name(self):
return "Example"
def page_id(self):
return "example"
def construct_layout(self):
return self._sc.extra_width_row_cell(
[
self._sc.header_bar(
self.page_name(), img='logo.png', id=['msg-status', 'help-status'], prefix_id=self.page_id(),
description=['Here is an example of using chartpy with dash',
"Redrawn at " + datetime.datetime.utcnow().strftime("%b %d %Y %H:%M:%S")]),
self._sc.horizontal_bar(),
self._sc.row_cell([self._sc.inputbox(caption='Quandl Ticker', id='ticker-val',
prefix_id=self.page_id(),
start_values='FRED/DEXUSEU',
width=980)]),
self._sc.horizontal_bar(),
self._sc.button(caption='Calculate', id='calculate-button', prefix_id=self.page_id()),
self._sc.horizontal_bar(),
self._sc.plot("Quandl Plot", id=['spot-fig', 'vol-fig'], prefix_id=self.page_id(), height=500),
]
)
| apache-2.0 |
fboers/jumeg | jumeg/jumeg_volmorpher.py | 1 | 54621 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Authors: Daniel van de Velden ([email protected])
#
# License: BSD (3-clause)
import os
import os.path as op
import time as time2
import mne
import numpy as np
from mne.transforms import (rotation, rotation3d, scaling,
translation, apply_trans)
from mne.source_space import _get_lut, _get_lut_id, _get_mgz_header
from mne.source_estimate import _write_stc
import matplotlib.pyplot as plt
from mne.source_estimate import VolSourceEstimate
# from matplotlib import cm
from matplotlib.ticker import LinearLocator
from scipy.optimize import leastsq
from scipy import linalg
from scipy.spatial.distance import cdist
from sklearn.neighbors import BallTree
from scipy.interpolate import griddata
import nibabel as nib
from functools import reduce
from jumeg.jumeg_utils import loadingBar
from jumeg.jumeg_volume_plotting import plot_vstc
import logging
logger = logging.getLogger("root")
# =============================================================================
#
# =============================================================================
def convert_to_unicode(inlist):
if type(inlist) != str:
inlist = inlist.decode('utf-8')
return inlist
else:
return inlist
def read_vert_labelwise(fname_src, subject, subjects_dir):
"""Read the labelwise vertice file and remove duplicates.
Parameters
----------
fname_src : string
Path to a source space file.
subject : str | None
The subject name. It is necessary to set the
subject parameter to avoid analysis errors.
subjects_dir : string
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
label_dict : dict
A dict containing all labels available for the subject's source space
and their respective vertex indices
"""
fname_labels = fname_src[:-4] + '_vertno_labelwise.npy'
label_dict = np.load(fname_labels, encoding='latin1').item()
subj_vert_src = mne.read_source_spaces(fname_src)
label_dict = _remove_vert_duplicates(subject, subj_vert_src, label_dict,
subjects_dir)
del subj_vert_src
return label_dict
def _point_cloud_error_balltree(subj_p, temp_tree):
"""Find the distance from each source point to its closest target point.
Uses sklearn.neighbors.BallTree for greater efficiency"""
dist, _ = temp_tree.query(subj_p)
err = dist.ravel()
return err
def _point_cloud_error(src_pts, tgt_pts):
"""Find the distance from each source point to its closest target point.
Parameters."""
y = cdist(src_pts, tgt_pts, 'euclidean')
dist = y.min(axis=1)
return dist
def _trans_from_est(params):
"""Convert transformation parameters into a transformation matrix."""
i = 0
trans = []
x, y, z = params[:3]
trans.insert(0, translation(x, y, z))
i += 3
x, y, z = params[i:i + 3]
trans.append(rotation(x, y, z))
i += 3
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
i += 3
trans = reduce(np.dot, trans)
return trans
def _get_scaling_factors(s_pts, t_pts):
"""
Calculate scaling factors to match the size of the subject
brain and the template brain.
Paramters:
----------
s_pts : np.array
Coordinates of the vertices in a given label from the
subject source space.
t_pts : np.array
Coordinates of the vertices in a given label from the
template source space.
Returns:
--------
"""
# Get the x-,y-,z- min and max Limits to create the span for each axis
s_x, s_y, s_z = s_pts.T
s_x_diff = np.max(s_x) - np.min(s_x)
s_y_diff = np.max(s_y) - np.min(s_y)
s_z_diff = np.max(s_z) - np.min(s_z)
t_x, t_y, t_z = t_pts.T
t_x_diff = np.max(t_x) - np.min(t_x)
t_y_diff = np.max(t_y) - np.min(t_y)
t_z_diff = np.max(t_z) - np.min(t_z)
# Calculate a scaling factor for the subject to match template size
# and avoid 'Nan' by zero division
# instead of comparing float with zero, check absolute value up to a given precision
precision = 1e-18
if np.fabs(t_x_diff) < precision or np.fabs(s_x_diff) < precision:
x_scale = 0.
else:
x_scale = t_x_diff / s_x_diff
if np.fabs(t_y_diff) < precision or np.fabs(s_y_diff) < precision:
y_scale = 0.
else:
y_scale = t_y_diff / s_y_diff
if np.fabs(t_z_diff) < precision or np.fabs(s_z_diff) < precision:
z_scale = 0.
else:
z_scale = t_z_diff / s_z_diff
return x_scale, y_scale, z_scale
def _get_best_trans_matrix(init_trans, s_pts, t_pts, template_spacing, e_func):
"""
Calculate the least squares error for different variations of
the initial transformation and return the transformation with
the minimum error
Parameters:
-----------
init_trans : np.array of shape (4, 4)
Numpy array containing the initial transformation matrix.
s_pts : np.array
Coordinates of the vertices in a given label from the
subject source space.
t_pts : np.array
Coordinates of the vertices in a given label from the
template source space.
template_spacing : float
Grid spacing for the template source space.
e_func : str
Either 'balltree' or 'euclidean'.
Returns:
--------
trans :
err_stats : [dist_mean, dist_max, dist_var, dist_err]
"""
if e_func == 'balltree':
errfunc = _point_cloud_error_balltree
temp_tree = BallTree(t_pts)
else:
# e_func == 'euclidean'
errfunc = _point_cloud_error
temp_tree = None
# Find calculate the least squares error for variation of the initial transformation
poss_trans = find_optimum_transformations(init_trans, s_pts, t_pts, template_spacing,
e_func, temp_tree, errfunc)
dist_max_list = []
dist_mean_list = []
dist_var_list = []
dist_err_list = []
for tra in poss_trans:
points_to_match = s_pts
points_to_match = apply_trans(tra, points_to_match)
if e_func == 'balltree':
template_pts = temp_tree
else:
# e_func == 'euclidean'
template_pts = t_pts
dist_mean_list.append(np.mean(errfunc(points_to_match[:, :3], template_pts)))
dist_var_list.append(np.var(errfunc(points_to_match[:, :3], template_pts)))
dist_max_list.append(np.max(errfunc(points_to_match[:, :3], template_pts)))
dist_err_list.append(errfunc(points_to_match[:, :3], template_pts))
del points_to_match
dist_mean_arr = np.asarray(dist_mean_list)
# Select the best fitting Transformation-Matrix
# (casting as int not necessary but avoids warning in pycharm)
idx1 = int(np.argmin(dist_mean_arr))
# Collect all values belonging to the optimum solution
trans = poss_trans[idx1]
dist_max = dist_max_list[idx1]
dist_mean = dist_mean_list[idx1]
dist_var = dist_var_list[idx1]
dist_err = dist_err_list[idx1]
del poss_trans
del dist_mean_arr
del dist_mean_list
del dist_max_list
del dist_var_list
del dist_err_list
err_stats = [dist_mean, dist_max, dist_var, dist_err]
return trans, err_stats
def auto_match_labels(fname_subj_src, label_dict_subject,
fname_temp_src, label_dict_template,
subjects_dir, volume_labels, template_spacing,
e_func, fname_save, save_trans=False):
"""
Matches a subject's volume source space labelwise to another volume
source space
Parameters
----------
fname_subj_src : string
Filename of the first volume source space.
label_dict_subject : dict
Dictionary containing all labels and the numbers of the
vertices belonging to these labels for the subject.
fname_temp_src : string
Filename of the second volume source space to match on.
label_dict_template : dict
Dictionary containing all labels and the numbers of the
vertices belonging to these labels for the template.
volume_labels : list of volume Labels
List of the volume labels of interest
subjects_dir : str
Path to the subject directory.
template_spacing : int | float
The grid distances of the second volume source space in mm
e_func : string | None
Error function, either 'balltree' or 'euclidean'. If None, the
default 'balltree' function is used.
fname_save : str
File name under which the transformation matrix is to be saved.
save_trans : bool
If it is True the transformation matrix for each label is saved
as a dictionary. False is default
Returns
-------
label_trans_dic : dict
Dictionary of all the labels transformation matrizes
label_trans_dic_err : dict
Dictionary of all the labels transformation matrizes distance
errors (mm)
label_trans_dic_mean_dist : dict
Dictionary of all the labels transformation matrizes mean
distances (mm)
label_trans_dic_max_dist : dict
Dictionary of all the labels transformation matrizes max
distance (mm)
label_trans_dic_var_dist : dict
Dictionary of all the labels transformation matrizes distance
error variance (mm)
"""
if e_func == 'balltree':
err_function = 'BallTree Error Function'
elif e_func == 'euclidean':
err_function = 'Euclidean Error Function'
else:
print('No or invalid error function provided, using BallTree instead')
err_function = 'BallTree Error Function'
subj_src = mne.read_source_spaces(fname_subj_src)
x, y, z = subj_src[0]['rr'].T
# subj_p contains the coordinates of the vertices
subj_p = np.c_[x, y, z]
subject = subj_src[0]['subject_his_id']
temp_src = mne.read_source_spaces(fname_temp_src)
x1, y1, z1 = temp_src[0]['rr'].T
# temp_p contains the coordinates of the vertices
temp_p = np.c_[x1, y1, z1]
template = temp_src[0]['subject_his_id']
print("""\n#### Attempting to match %d volume source space labels from
Subject: '%s' to Template: '%s' with
%s...""" % (len(volume_labels), subject, template, err_function))
# make sure to remove duplicate vertices before matching
label_dict_subject = _remove_vert_duplicates(subject, subj_src, label_dict_subject,
subjects_dir)
label_dict_template = _remove_vert_duplicates(template, temp_src, label_dict_template,
subjects_dir)
vert_sum = 0
vert_sum_temp = 0
for label_i in volume_labels:
vert_sum = vert_sum + label_dict_subject[label_i].shape[0]
vert_sum_temp = vert_sum_temp + label_dict_template[label_i].shape[0]
# check for overlapping labels
for label_j in volume_labels:
if label_i != label_j:
h = np.intersect1d(label_dict_subject[label_i], label_dict_subject[label_j])
if h.shape[0] > 0:
raise ValueError("Label %s contains %d vertices from label %s" % (label_i,
h.shape[0],
label_j))
print(' # N subject vertices:', vert_sum)
print(' # N template vertices:', vert_sum_temp)
# Prepare empty containers to store the possible transformation results
label_trans_dic = {}
label_trans_dic_err = {}
label_trans_dic_var_dist = {}
label_trans_dic_mean_dist = {}
label_trans_dic_max_dist = {}
start_time = time2.time()
del subj_src, temp_src
for label_idx, label in enumerate(volume_labels):
loadingBar(count=label_idx, total=len(volume_labels),
task_part='%s' % label)
print('')
# Select coords for label and check if they exceed the label size limit
s_pts = subj_p[label_dict_subject[label]]
t_pts = temp_p[label_dict_template[label]]
# IIRC: the error function in find_optimum_transformations needs at least
# 6 points. if all points are the same then this point is taken as
# minimum -> for clarifications ask Daniel
if s_pts.shape[0] == 0:
raise ValueError("The label does not contain any vertices for the subject.")
elif s_pts.shape[0] < 6:
while s_pts.shape[0] < 6:
s_pts = np.concatenate((s_pts, s_pts))
if t_pts.shape[0] == 0:
# Append the Dictionaries with the zeros since there is no label to
# match the points
trans = _trans_from_est(np.zeros([9, 1]))
trans[0, 0], trans[1, 1], trans[2, 2] = 1., 1., 1.
label_trans_dic.update({label: trans})
label_trans_dic_mean_dist.update({label: np.min(0)})
label_trans_dic_max_dist.update({label: np.min(0)})
label_trans_dic_var_dist.update({label: np.min(0)})
label_trans_dic_err.update({label: 0})
else:
# Calculate a scaling factor for the subject to match template size
x_scale, y_scale, z_scale = _get_scaling_factors(s_pts, t_pts)
# Find center of mass
cm_s = np.mean(s_pts, axis=0)
cm_t = np.mean(t_pts, axis=0)
initial_transl = (cm_t - cm_s)
# Create the the initial transformation matrix
init_trans = np.zeros([4, 4])
init_trans[:3, :3] = rotation3d(0., 0., 0.) * [x_scale, y_scale, z_scale]
init_trans[0, 3] = initial_transl[0]
init_trans[1, 3] = initial_transl[1]
init_trans[2, 3] = initial_transl[2]
init_trans[3, 3] = 1.
# Calculate the least squares error for different variations of
# the initial transformation and return the transformation with
# the minimum error
trans, err_stats = _get_best_trans_matrix(init_trans, s_pts, t_pts,
template_spacing, e_func)
# TODO: test that the results are still the same
[dist_mean, dist_max, dist_var, dist_err] = err_stats
# Append the Dictionaries with the result and error values
label_trans_dic.update({label: trans})
label_trans_dic_mean_dist.update({label: dist_mean})
label_trans_dic_max_dist.update({label: dist_max})
label_trans_dic_var_dist.update({label: dist_var})
label_trans_dic_err.update({label: dist_err})
if save_trans:
print('\n Writing Transformation matrices to file..')
fname_lw_trans = fname_save
mat_mak_trans_dict = dict()
mat_mak_trans_dict['ID'] = '%s -> %s' % (subject, template)
mat_mak_trans_dict['Labeltransformation'] = label_trans_dic
mat_mak_trans_dict['Transformation Error[mm]'] = label_trans_dic_err
mat_mak_trans_dict['Mean Distance Error [mm]'] = label_trans_dic_mean_dist
mat_mak_trans_dict['Max Distance Error [mm]'] = label_trans_dic_max_dist
mat_mak_trans_dict['Distance Variance Error [mm]'] = label_trans_dic_var_dist
mat_mak_trans_dict_arr = np.array([mat_mak_trans_dict])
np.save(fname_lw_trans, mat_mak_trans_dict_arr)
print(' [done] -> Calculation Time: %.2f minutes.' % (
((time2.time() - start_time) / 60)))
return
else:
return (label_trans_dic, label_trans_dic_err, label_trans_dic_mean_dist,
label_trans_dic_max_dist, label_trans_dic_var_dist)
def find_optimum_transformations(init_trans, s_pts, t_pts, template_spacing,
e_func, temp_tree, errfunc):
"""
Vary the initial transformation by a translation of up to three times the
grid spacing and compute the transformation with the smallest least square
error.
Parameters:
-----------
init_trans : 4-D transformation matrix
Initial guess of the transformation matrix from the subject brain to
the template brain.
s_pts :
Vertex coordinates in the subject brain.
t_pts :
Vertex coordinates in the template brain.
template_spacing : float
Grid spacing of the vertices in the template brain.
e_func : str
Error function to use. Either 'balltree' or 'euclidian'.
temp_tree :
BallTree(t_pts) if e_func is 'balltree'.
errfunc :
The error function for the computation of the least squares error.
Returns:
--------
poss_trans : list of 4-D transformation matrices
List of one transformation matrix for each variation of the intial
transformation with the smallest least squares error.
"""
# template spacing in meters
tsm = template_spacing / 1e3
# Try different initial translations in space to avoid local minima
# No label should require a translation by more than 3 times the grid spacing (tsm)
auto_match_iters = np.array([[0., 0., 0.],
[0., 0., tsm], [0., 0., tsm * 2], [0., 0., tsm * 3],
[tsm, 0., 0.], [tsm * 2, 0., 0.], [tsm * 3, 0., 0.],
[0., tsm, 0.], [0., tsm * 2, 0.], [0., tsm * 3, 0.],
[0., 0., -tsm], [0., 0., -tsm * 2], [0., 0., -tsm * 3],
[-tsm, 0., 0.], [-tsm * 2, 0., 0.], [-tsm * 3, 0., 0.],
[0., -tsm, 0.], [0., -tsm * 2, 0.], [0., -tsm * 3, 0.]])
# possible translation matrices
poss_trans = []
for p, ami in enumerate(auto_match_iters):
# vary the initial translation value by adding ami
tx, ty, tz = init_trans[0, 3] + ami[0], init_trans[1, 3] + ami[1], init_trans[2, 3] + ami[2]
sx, sy, sz = init_trans[0, 0], init_trans[1, 1], init_trans[2, 2]
rx, ry, rz = 0, 0, 0
# starting point for finding the transformation matrix trans which
# minimizes the error between np.dot(s_pts, trans) and t_pts
x0 = np.array([tx, ty, tz, rx, ry, rz])
def error(x):
tx_, ty_, tz_, rx_, ry_, rz_ = x
trans0 = np.zeros([4, 4])
trans0[:3, :3] = rotation3d(rx_, ry_, rz_) * [sx, sy, sz]
trans0[0, 3] = tx_
trans0[1, 3] = ty_
trans0[2, 3] = tz_
# rotate and scale
estim = np.dot(s_pts, trans0[:3, :3].T)
# translate
estim += trans0[:3, 3]
if e_func == 'balltree':
err = errfunc(estim[:, :3], temp_tree)
else:
# e_func == 'euclidean'
err = errfunc(estim[:, :3], t_pts)
return err
est, _, info, msg, _ = leastsq(error, x0, full_output=True)
est = np.concatenate((est, (init_trans[0, 0],
init_trans[1, 1],
init_trans[2, 2])
))
trans = _trans_from_est(est)
poss_trans.append(trans)
return poss_trans
def _transform_src_lw(vsrc_subject_from, label_dict_subject_from,
volume_labels, subject_to,
subjects_dir, label_trans_dic=None):
"""Transformes given Labels of interest from one subjects' to another.
Parameters
----------
vsrc_subject_from : instance of SourceSpaces
The source spaces that will be transformed.
label_dict_subject_from : dict
volume_labels : list
List of the volume labels of interest
subject_to : str | None
The template subject.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
label_trans_dic : dict | None
Dictionary containing transformation matrices for all labels (acquired
by auto_match_labels function). If label_trans_dic is None the method
will attempt to read the file from disc.
Returns
-------
transformed_p : array
Transformed points from subject volume source space to volume source
space of the template subject.
idx_vertices : array
Array of idxs for all transformed vertices in the volume source space.
"""
subj_vol = vsrc_subject_from
subject = subj_vol[0]['subject_his_id']
x, y, z = subj_vol[0]['rr'].T
subj_p = np.c_[x, y, z]
label_dict = label_dict_subject_from
print("""\n#### Attempting to transform %s source space labelwise to
%s source space..""" % (subject, subject_to))
if label_trans_dic is None:
print('\n#### Attempting to read MatchMaking Transformations from file..')
indiv_spacing = (np.abs(subj_vol[0]['rr'][0, 0]) -
np.abs(subj_vol[0]['rr'][1, 0])) * 1e3
fname_lw_trans = op.join(subjects_dir, subject,
'%s_%s_vol-%.2f_lw-trans.npy' % (subject, subject_to,
indiv_spacing))
try:
mat_mak_trans_dict_arr = np.load(fname_lw_trans, encoding='latin1')
except IOError:
print('MatchMaking Transformations file NOT found:')
print(fname_lw_trans, '\n')
print('Please calculate the transformation matrix dictionary by using')
print('the jumeg.jumeg_volmorpher.auto_match_labels function.')
import sys
sys.exit(-1)
label_trans_id = mat_mak_trans_dict_arr[0]['ID']
print(' Reading MatchMaking file %s..' % label_trans_id)
label_trans_dic = mat_mak_trans_dict_arr[0]['Labeltransformation']
else:
label_trans_dic = label_trans_dic
vert_sum = []
for label_i in volume_labels:
vert_sum.append(label_dict[label_i].shape[0])
for label_j in volume_labels:
if label_i != label_j:
h = np.intersect1d(label_dict[label_i], label_dict[label_j])
if h.shape[0] > 0:
print("In Label:", label_i, """ are vertices from
Label:""", label_j, "(", h.shape[0], ")")
break
transformed_p = np.array([[0, 0, 0]])
idx_vertices = []
for idx, label in enumerate(volume_labels):
loadingBar(idx, len(volume_labels), task_part=label)
idx_vertices.append(label_dict[label])
trans_p = subj_p[label_dict[label]]
trans = label_trans_dic[label]
# apply trans
trans_p = apply_trans(trans, trans_p)
del trans
transformed_p = np.concatenate((transformed_p, trans_p))
del trans_p
transformed_p = transformed_p[1:]
idx_vertices = np.concatenate(np.asarray(idx_vertices))
print(' [done]')
return transformed_p, idx_vertices
def set_unwanted_to_zero(vsrc, stc_data, volume_labels, label_dict):
"""
Parameters:
-----------
vsrc : mne.VolSourceSpace
stc_data : np.array
data from source time courses.
volume_labels : list of str
List with volume labels of interest
label_dict : dict
Dictionary containing for each label the indices of the
vertices which are part of the label.
Returns:
--------
stc_data_mod : np.array()
The modified stc_data array with data set to zero for
vertices which are not part of the labels of interest.
"""
# label of interest
loi_idx = list()
for p, labels in enumerate(volume_labels):
label_verts = label_dict[labels]
for i in range(0, label_verts.shape[0]):
loi_idx.append(np.where(label_verts[i] == vsrc[0]['vertno']))
loi_idx = np.asarray(loi_idx)
stc_data_mod = np.zeros(stc_data.shape)
stc_data_mod[loi_idx, :] = stc_data[loi_idx, :]
return stc_data_mod
def volume_morph_stc(fname_stc_orig, subject_from, fname_vsrc_subject_from,
volume_labels, subject_to, fname_vsrc_subject_to,
cond, interpolation_method, normalize, subjects_dir,
unwanted_to_zero=True, label_trans_dic=None, run=None,
n_iter=None, fname_save_stc=None, save_stc=False, plot=False):
"""
Perform volume morphing from one subject to a template.
Parameters
----------
fname_stc_orig : string
Filepath of the original stc
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
fname_vsrc_subject_from : str
Filepath of the subjects volume source space
volume_labels : list of volume Labels
List of the volume labels of interest
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
fname_vsrc_subject_to : string
Filepath of the template subjects volume source space
interpolation_method : str
Only 'linear' seeems to be working for 3D data. 'balltree' and
'euclidean' only work for 2D?.
cond : str (Not really needed)
Experimental condition under which the data was recorded.
normalize : bool
If True, normalize activity patterns label by label before and after
morphing.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
unwanted_to_zero : bool
If True, set all non-Labels-of-interest in resulting stc to zero.
label_trans_dic : dict | None
Dictionary containing transformation matrices for all labels (acquired
by auto_match_labels function). If label_trans_dic is None the method
will attempt to read the file from disc.
run : int | None
Specifies the run if multiple measurements for the same condition
were performed.
n_iter : int | None
If MFT was used for the inverse solution, n_iter is the
number of iterations.
fname_save_stc : str | None
File name for the morphed volume stc file to be saved under.
If fname_save_stc is None, use the standard file name convention.
save_stc : bool
True to save. False is default
plot : bool
Plot the morphed stc.
Returns
-------
In Case of save_stc=True:
stc_morphed : VolSourceEstimate
Volume source estimate for the destination subject.
In Case of save_stc=False:
new_data : dict
One or more new stc data array
"""
print('#### START ####')
print('#### Volume Morphing ####')
if cond is None:
str_cond = ''
else:
str_cond = ' | Cond.: %s' % cond
if run is None:
str_run = ''
else:
str_run = ' | Run: %d' % run
if n_iter is None:
str_niter = ''
else:
str_niter = ' | Iter. :%d' % n_iter
string = ' Subject: %s' % subject_from + str_run + str_cond + str_niter
print(string)
print('\n#### Reading essential data files..')
# STC
stc_orig = mne.read_source_estimate(fname_stc_orig)
stcdata = stc_orig.data
nvert, ntimes = stc_orig.shape
tmin, tstep = stc_orig.times[0], stc_orig.tstep
# Source Spaces
subj_vol = mne.read_source_spaces(fname_vsrc_subject_from)
temp_vol = mne.read_source_spaces(fname_vsrc_subject_to)
###########################################################################
# get dictionaries with labels and their respective vertices
###########################################################################
fname_label_dict_subject_from = (fname_vsrc_subject_from[:-4] +
'_vertno_labelwise.npy')
label_dict_subject_from = np.load(fname_label_dict_subject_from,
encoding='latin1').item()
fname_label_dict_subject_to = (fname_vsrc_subject_to[:-4] +
'_vertno_labelwise.npy')
label_dict_subject_to = np.load(fname_label_dict_subject_to,
encoding='latin1').item()
# Check for vertex duplicates
label_dict_subject_from = _remove_vert_duplicates(subject_from, subj_vol,
label_dict_subject_from,
subjects_dir)
###########################################################################
# Labelwise transform the whole subject source space
###########################################################################
transformed_p, idx_vertices = _transform_src_lw(subj_vol,
label_dict_subject_from,
volume_labels, subject_to,
subjects_dir,
label_trans_dic)
xn, yn, zn = transformed_p.T
stcdata_sel = []
for p, i in enumerate(idx_vertices):
stcdata_sel.append(np.where(idx_vertices[p] == subj_vol[0]['vertno']))
stcdata_sel = np.asarray(stcdata_sel).flatten()
stcdata_ch = stcdata[stcdata_sel]
###########################################################################
# Interpolate the data
###########################################################################
print('\n#### Attempting to interpolate STC Data for every time sample..')
print(' Interpolation method: ', interpolation_method)
st_time = time2.time()
xt, yt, zt = temp_vol[0]['rr'][temp_vol[0]['inuse'].astype(bool)].T
inter_data = np.zeros([xt.shape[0], ntimes])
for i in range(0, ntimes):
loadingBar(i, ntimes, task_part='Time slice: %i' % (i + 1))
inter_data[:, i] = griddata((xn, yn, zn), stcdata_ch[:, i], (xt, yt, zt),
method=interpolation_method, rescale=True)
if interpolation_method == 'linear':
inter_data = np.nan_to_num(inter_data)
if unwanted_to_zero:
print('#### Setting all unknown vertex values to zero..')
# set all vertices that do not belong to a label of interest (given by
# label_dict IIRC) to zero
inter_data = set_unwanted_to_zero(temp_vol, inter_data, volume_labels, label_dict_subject_to)
# do the same for the original data for normalization purposes (I think)
data_utz = set_unwanted_to_zero(subj_vol, stc_orig.data, volume_labels, label_dict_subject_from)
stc_orig.data = data_utz
if normalize:
print('\n#### Attempting to normalize the vol-morphed stc..')
normalized_new_data = inter_data.copy()
for p, labels in enumerate(volume_labels):
lab_verts = label_dict_subject_from[labels]
lab_verts_temp = label_dict_subject_to[labels]
# get for the subject brain the indices of all vertices for the given label
subj_vert_idx = []
for i in range(0, lab_verts.shape[0]):
subj_vert_idx.append(np.where(lab_verts[i] == subj_vol[0]['vertno']))
subj_vert_idx = np.asarray(subj_vert_idx)
# get for the template brain the indices of all vertices for the given label
temp_vert_idx = []
for i in range(0, lab_verts_temp.shape[0]):
temp_vert_idx.append(np.where(lab_verts_temp[i] == temp_vol[0]['vertno']))
temp_vert_idx = np.asarray(temp_vert_idx)
# The original implementation by Daniel did not use the absolute
# value for normalization. This is probably because he used MFT
# for the inverse solution which only provides positive activity
# values.
# a = np.sum(stc_orig.data[subj_vert_idx], axis=0)
# b = np.sum(inter_data[temp_vert_idx], axis=0)
# norm_m_score = a / b
# The LCMV beamformer can result in positive as well as negative
# values which can cancel each other out, e.g., after morphing
# there are more vertices in a "negative value area" than before
# resulting in a smaller sum 'b' -> norm_m_score becomes large.
afabs = np.sum(np.fabs(stc_orig.data[subj_vert_idx]), axis=0)
bfabs = np.sum(np.fabs(inter_data[temp_vert_idx]), axis=0)
norm_m_score = afabs / bfabs
normalized_new_data[temp_vert_idx] *= norm_m_score
new_data = normalized_new_data
else:
new_data = inter_data
print(' [done] -> Calculation Time: %.2f minutes.' % (
(time2.time() - st_time) / 60.
))
if save_stc:
print('\n#### Attempting to write interpolated STC Data to file..')
if fname_save_stc is None:
fname_stc_morphed = fname_stc_orig[:-7] + '_morphed_to_%s_%s-vl.stc'
fname_stc_morphed = fname_stc_morphed % (subject_to, interpolation_method)
else:
fname_stc_morphed = fname_save_stc
print(' Destination:', fname_stc_morphed)
_write_stc(fname_stc_morphed, tmin=tmin, tstep=tstep,
vertices=temp_vol[0]['vertno'], data=new_data)
stc_morphed = mne.read_source_estimate(fname_stc_morphed)
if plot:
_volumemorphing_plot_results(stc_orig, stc_morphed,
subj_vol, label_dict_subject_from,
temp_vol, label_dict_subject_to,
volume_labels, subjects_dir=subjects_dir,
cond=cond, run=run, n_iter=n_iter, save=True)
print('#### Volume Morphing ####')
print('#### DONE ####')
return stc_morphed
print('#### Volume morphed stc data NOT saved.. ####\n')
print('#### Volume Morphing ####')
print('#### DONE ####')
return new_data
def _volumemorphing_plot_results(stc_orig, stc_morphed,
volume_orig, label_dict_from,
volume_temp, label_dict_to,
volume_labels, subjects_dir,
cond, run=None, n_iter=None,
save=False):
"""
Plot before and after morphing results.
Parameters
----------
stc_orig : VolSourceEstimate
Volume source estimate for the original subject.
stc_morphed : VolSourceEstimate
Volume source estimate for the destination subject.
volume_orig : instance of SourceSpaces
The original source space that were morphed to the current
subject.
label_dict_from : dict
Equivalent label vertex dict to the original source space
volume_temp : instance of SourceSpaces
The template source space that is morphed on.
label_dict_to : dict
Equivalent label vertex dict to the template source space
volume_labels : list of volume Labels
List of the volume labels of interest
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
cond : str
Evoked condition as a string to give the plot more intel.
run : int | None
Specifies the run if multiple measurements for the same condition
were performed.
n_iter : int | None
If MFT was used for the inverse solution, n_iter is the
number of iterations.
Returns
-------
if save == True : None
Automatically creates matplotlib.figure and writes it to disk.
if save == False : returns matplotlib.figure
"""
if run is None:
run_title = ''
run_fname = ''
else:
run_title = ' | Run: %d' % run
run_fname = ',run%d' % run
if n_iter is None:
n_iter_title = ''
n_iter_fname = ''
else:
n_iter_title = ' | Iter.: %d' % n_iter
n_iter_fname = ',iter-%d' % n_iter
subj_vol = volume_orig
subject_from = volume_orig[0]['subject_his_id']
temp_vol = volume_temp
temp_spacing = (abs(temp_vol[0]['rr'][0, 0]
- temp_vol[0]['rr'][1, 0]) * 1000).round()
subject_to = volume_temp[0]['subject_his_id']
label_dict = label_dict_from
label_dict_template = label_dict_to
new_data = stc_morphed.data
indiv_spacing = make_indiv_spacing(subject_from, subject_to,
temp_spacing, subjects_dir)
print('\n#### Attempting to save the volume morphing results ..')
directory = op.join(subjects_dir , subject_from, 'plots', 'VolumeMorphing')
if not op.exists(directory):
os.makedirs(directory)
# Create new figure and two subplots, sharing both axes
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True,
num=999, figsize=(16, 9))
fig.text(0.985, 0.75, 'Amplitude [T]', color='white', size='large',
horizontalalignment='right', verticalalignment='center',
rotation=-90, transform=ax1.transAxes)
fig.text(0.985, 0.25, 'Amplitude [T]', color='white', size='large',
horizontalalignment='right', verticalalignment='center',
rotation=-90, transform=ax2.transAxes)
suptitle = 'VolumeMorphing from %s to %s' % (subject_from, subject_to)
suptitle = suptitle + ' | Cond.: %s' % cond
suptitle = suptitle + run_title + n_iter_title
plt.suptitle(suptitle, fontsize=16, color='white')
fig.set_facecolor('black')
plt.tight_layout()
fig.subplots_adjust(bottom=0.04, top=0.94,
left=0.0, right=0.97)
t = int(np.where(np.sum(stc_orig.data, axis=0)
== np.max(np.sum(stc_orig.data, axis=0)))[0])
plot_vstc(vstc=stc_orig, vsrc=volume_orig, tstep=stc_orig.tstep,
subjects_dir=subjects_dir, time_sample=t, coords=None,
figure=999, axes=ax1, save=False)
plot_vstc(vstc=stc_morphed, vsrc=volume_temp, tstep=stc_orig.tstep,
subjects_dir=subjects_dir, time_sample=t, coords=None,
figure=999, axes=ax2, save=False)
if save:
fname_save_fig = '%s_to_%s' + run_fname
fname_save_fig = fname_save_fig + ',vol-%.2f,%s'
fname_save_fig = fname_save_fig % (subject_from, subject_to, indiv_spacing, cond)
fname_save_fig = fname_save_fig + n_iter_fname
fname_save_fig = op.join(directory, fname_save_fig + ',volmorphing-result.png')
plt.savefig(fname_save_fig, facecolor=fig.get_facecolor(),
format='png', edgecolor='none')
plt.close()
else:
plt.show()
print("""\n#### Attempting to compare subjects activity and interpolated
activity in template for all labels..""")
subj_lab_act = {}
temp_lab_act = {}
for label in volume_labels:
lab_arr = label_dict[str(label)]
lab_arr_temp = label_dict_template[str(label)]
subj_vert_idx = np.array([], dtype=int)
temp_vert_idx = np.array([], dtype=int)
for i in range(0, lab_arr.shape[0]):
subj_vert_idx = np.append(subj_vert_idx,
np.where(lab_arr[i]
== subj_vol[0]['vertno']))
for i in range(0, lab_arr_temp.shape[0]):
temp_vert_idx = np.append(temp_vert_idx,
np.where(lab_arr_temp[i]
== temp_vol[0]['vertno']))
lab_act_sum = np.array([])
lab_act_sum_temp = np.array([])
for t in range(0, stc_orig.times.shape[0]):
lab_act_sum = np.append(lab_act_sum,
np.sum(stc_orig.data[subj_vert_idx, t]))
lab_act_sum_temp = np.append(lab_act_sum_temp,
np.sum(stc_morphed.data[temp_vert_idx, t]))
subj_lab_act.update({label: lab_act_sum})
temp_lab_act.update({label: lab_act_sum_temp})
print(' [done]')
# Stc per label
# fig, axs = plt.subplots(len(volume_labels) / 5, 5, figsize=(15, 15),
# facecolor='w', edgecolor='k')
fig, axs = plt.subplots(int(np.ceil(len(volume_labels) / 5.)), 5, figsize=(15, 15),
facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace=.6, wspace=.255,
bottom=0.089, top=.9,
left=0.03, right=0.985)
axs = axs.ravel()
for idx, label in enumerate(volume_labels):
axs[idx].plot(stc_orig.times, subj_lab_act[label], '#00868B',
linewidth=0.9, label=('%s vol-%.2f'
% (subject_from, indiv_spacing)))
axs[idx].plot(stc_orig.times, temp_lab_act[label], '#CD7600', ls=':',
linewidth=0.9, label=('%s volume morphed vol-%.2f'
% (subject_from, temp_spacing)))
axs[idx].set_title(label, fontsize='medium', loc='right')
axs[idx].ticklabel_format(style='sci', axis='both')
axs[idx].set_xlabel('Time [s]')
axs[idx].set_ylabel('Amplitude [T]')
axs[idx].set_xlim(stc_orig.times[0], stc_orig.times[-1])
axs[idx].get_xaxis().grid(True)
suptitle = 'Summed activity in volume labels - %s[%.2f]' % (subject_from, indiv_spacing)
suptitle = suptitle + ' -> %s [%.2f] | Cond.: %s' % (subject_to, temp_spacing, cond)
suptitle = suptitle + run_title + n_iter_title
fig.suptitle(suptitle, fontsize=16)
if save:
fname_save_fig = '%s_to_%s' + run_fname
fname_save_fig = fname_save_fig + ',vol-%.2f,%s'
fname_save_fig = fname_save_fig % (subject_from, subject_to, indiv_spacing, cond)
fname_save_fig = fname_save_fig + n_iter_fname
fname_save_fig = op.join(directory, fname_save_fig + ',labelwise-stc.png')
plt.savefig(fname_save_fig, facecolor=fig.get_facecolor(),
format='png', edgecolor='none')
plt.close()
else:
plt.show()
orig_act_sum = np.sum(stc_orig.data.sum(axis=0))
morphed_act_sum = np.sum(new_data.sum(axis=0))
act_diff_perc = ((morphed_act_sum - orig_act_sum) / orig_act_sum) * 100
act_sum_morphed_normed = np.sum(new_data.sum(axis=0))
act_diff_perc_morphed_normed = ((act_sum_morphed_normed - orig_act_sum)
/ orig_act_sum) * 100
f, (ax1) = plt.subplots(1, figsize=(16, 5))
ax1.plot(stc_orig.times, stc_orig.data.sum(axis=0), '#00868B', linewidth=1,
label='%s' % subject_from)
ax1.plot(stc_orig.times, new_data.sum(axis=0), '#CD7600', linewidth=1,
label='%s morphed' % subject_from)
title = 'Summed Source Amplitude - %s[%.2f] ' % (subject_from, indiv_spacing)
title = title + '-> %s [%.2f] | Cond.: %s' % (subject_to, temp_spacing, cond)
title = title + run_title + n_iter_title
ax1.set_title(title)
ax1.text(stc_orig.times[0],
np.maximum(stc_orig.data.sum(axis=0), new_data.sum(axis=0)).max(),
"""Total Amplitude Difference: %+.2f %%
Total Amplitude Difference (norm): %+.2f %%"""
% (act_diff_perc, act_diff_perc_morphed_normed),
size=12, ha="left", va="top",
bbox=dict(boxstyle="round",
ec="grey",
fc="white",
)
)
ax1.set_ylabel('Summed Source Amplitude')
ax1.legend(fontsize='large', facecolor="white", edgecolor="grey")
ax1.get_xaxis().grid(True)
plt.tight_layout()
if save:
fname_save_fig = '%s_to_%s' + run_fname
fname_save_fig = fname_save_fig + ',vol-%.2f,%s'
fname_save_fig = fname_save_fig % (subject_from, subject_to, indiv_spacing, cond)
fname_save_fig = fname_save_fig + n_iter_fname
fname_save_fig = op.join(directory, fname_save_fig + ',stc.png')
plt.savefig(fname_save_fig, facecolor=fig.get_facecolor(),
format='png', edgecolor='none')
plt.close()
else:
plt.show()
return
def make_indiv_spacing(subject, ave_subject, template_spacing, subjects_dir):
"""
Identifies the suiting grid space difference of a subject's volume
source space to a template's volume source space, before a planned
morphing takes place.
Parameters:
-----------
subject : str
Subject ID.
ave_subject : str
Name or ID of the template brain, e.g., fsaverage.
template_spacing : float
Grid spacing used for the template brain.
subjects_dir : str
Path to the subjects directory.
Returns:
--------
trans : SourceEstimate
The generated source time courses.
"""
fname_surf = op.join(subjects_dir, subject, 'bem', 'watershed', '%s_inner_skull_surface' % subject)
fname_surf_temp = op.join(subjects_dir, ave_subject, 'bem', 'watershed', '%s_inner_skull_surface' % ave_subject)
surf = mne.read_surface(fname_surf, return_dict=True, verbose='ERROR')[-1]
surf_temp = mne.read_surface(fname_surf_temp, return_dict=True, verbose='ERROR')[-1]
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
mins_temp = np.min(surf_temp['rr'], axis=0)
maxs_temp = np.max(surf_temp['rr'], axis=0)
# Check which dimension (x,y,z) has greatest difference
diff = (maxs - mins)
diff_temp = (maxs_temp - mins_temp)
# print additional information
# for c, mi, ma, md in zip('xyz', mins, maxs, diff):
# logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm'
# % (c, mi, ma, md))
# for c, mi, ma, md in zip('xyz', mins_temp, maxs_temp, diff_temp):
# logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm'
# % (c, mi, ma, md))
prop = (diff / diff_temp).mean()
indiv_spacing = (prop * template_spacing)
print(" '%s' individual-spacing to '%s'[%.2f] is: %.4fmm" % (
subject, ave_subject, template_spacing, indiv_spacing))
return indiv_spacing
def _remove_vert_duplicates(subject, subj_src, label_dict_subject,
subjects_dir):
"""
Removes all vertex duplicates from the vertex label list.
(Those appear because of an unsuitable process of creating labelwise
volume source spaces in mne-python)
Parameters:
-----------
subject : str
Subject ID.
subj_src : mne.SourceSpaces
Volume source space for the subject brain.
label_dict_subject : dict
Dictionary with the labels and their respective vertices
for the subject.
subjects_dir : str
Path to the subjects directory.
Returns:
--------
label_dict_subject : dict
Dictionary with the labels and their respective vertices
for the subject where duplicate vertices have been removed.
"""
fname_s_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
mgz = nib.load(fname_s_aseg)
mgz_data = mgz.get_data()
lut = _get_lut()
vox2rastkr_trans = _get_mgz_header(fname_s_aseg)['vox2ras_tkr']
vox2rastkr_trans[:3] /= 1000.
inv_vox2rastkr_trans = linalg.inv(vox2rastkr_trans)
all_volume_labels = mne.get_volume_labels_from_aseg(fname_s_aseg)
all_volume_labels.remove('Unknown')
print("""\n#### Attempting to check for vertice duplicates in labels due to
spatial aliasing in %s's volume source creation..""" % subject)
del_count = 0
for p, label in enumerate(all_volume_labels):
loadingBar(p, len(all_volume_labels), task_part=None)
lab_arr = label_dict_subject[label]
# get freesurfer LUT ID for the label
lab_id = _get_lut_id(lut, label, True)[0]
del_ver_idx_list = []
for arr_id, i in enumerate(lab_arr, 0):
# get the coordinates of the vertex in subject source space
lab_vert_coord = subj_src[0]['rr'][i]
# transform to mgz indices
lab_vert_mgz_idx = mne.transforms.apply_trans(inv_vox2rastkr_trans, lab_vert_coord)
# get ID from the mgt indices
orig_idx = mgz_data[int(round(lab_vert_mgz_idx[0])),
int(round(lab_vert_mgz_idx[1])),
int(round(lab_vert_mgz_idx[2]))]
# if ID and LUT ID do not match the vertex is removed
if orig_idx != lab_id:
del_ver_idx_list.append(arr_id)
del_count += 1
del_ver_idx = np.asarray(del_ver_idx_list)
label_dict_subject[label] = np.delete(label_dict_subject[label], del_ver_idx)
print(' Deleted', del_count, 'vertice duplicates.\n')
return label_dict_subject
# %% ===========================================================================
# # Statistical Analysis Section
# =============================================================================
def sum_up_vol_cluster(clu, p_thresh=0.05, tstep=1e-3, tmin=0,
subject=None, vertices=None):
"""Assemble summary VolSourceEstimate from spatiotemporal cluster results.
This helps visualizing results from spatio-temporal-clustering
permutation tests.
Parameters
----------
clu : tuple
the output from clustering permutation tests.
p_thresh : float
The significance threshold for inclusion of clusters.
tstep : float
The temporal difference between two time samples.
tmin : float | int
The time of the first sample.
subject : str
The name of the subject.
vertices : list of arrays | None
The vertex numbers associated with the source space locations.
Returns
-------
out : instance of VolSourceEstimate
A summary of the clusters. The first time point in this VolSourceEstimate
object is the summation of all the clusters. Subsequent time points
contain each individual cluster. The magnitude of the activity
corresponds to the length the cluster spans in time (in samples).
"""
T_obs, clusters, clu_pvals, _ = clu
n_times, n_vertices = T_obs.shape
good_cluster_inds = np.where(clu_pvals < p_thresh)[0]
# Build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the VolSourceEstimate
if len(good_cluster_inds) > 0:
data = np.zeros((n_vertices, n_times))
data_summary = np.zeros((n_vertices, len(good_cluster_inds) + 1))
print('Data_summary is in shape of:', data_summary.shape)
for ii, cluster_ind in enumerate(good_cluster_inds):
loadingBar(ii + 1, len(good_cluster_inds), task_part='Cluster Idx %i' % cluster_ind)
data.fill(0)
v_inds = clusters[cluster_ind][1]
t_inds = clusters[cluster_ind][0]
data[v_inds, t_inds] = T_obs[t_inds, v_inds]
# Store a nice visualization of the cluster by summing across time
data = np.sign(data) * np.logical_not(data == 0) * tstep
data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
# Make the first "time point" a sum across all clusters for easy
# visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
return VolSourceEstimate(data_summary, vertices, tmin=tmin, tstep=tstep,
subject=subject)
else:
raise RuntimeError('No significant clusters available. Please adjust '
'your threshold or check your statistical '
'analysis.')
def plot_T_obs(T_obs, threshold, tail, save, fname_save):
""" Visualize the Volume Source Estimate as an Nifti1 file """
# T_obs plot code
T_obs_flat = T_obs.flatten()
plt.figure('T-Statistics', figsize=(8, 8))
T_max = T_obs.max()
T_min = T_obs.min()
T_mean = T_obs.mean()
str_tail = 'one tail'
if tail is 0 or tail is None:
plt.xlim([-20, 20])
str_tail = 'two tail'
elif tail is -1:
plt.xlim([-20, 0])
else:
plt.xlim([0, T_obs_flat.max() * 1.05])
y, bin_edges = np.histogram(T_obs_flat,
range=(0, T_obs_flat.max()),
bins=500)
bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1])
if threshold is not None:
plt.plot([threshold, threshold], (0, y[bin_centers >= 0.].max()), color='#CD7600',
linestyle=':', linewidth=2)
legend = ('T-Statistics:\n'
' Mean: %.2f\n'
' Minimum: %.2f\n'
' Maximum: %.2f\n'
' Threshold: %.2f \n'
' ') % (T_mean, T_min, T_max, threshold)
plt.ylim(None, y[bin_centers >= 0.].max() * 1.1)
plt.xlabel('T-scores', fontsize=12)
plt.ylabel('T-values count', fontsize=12)
plt.title('T statistics distribution of t-test - %s' % str_tail, fontsize=15)
plt.plot(bin_centers, y, label=legend, color='#00868B')
# plt.xlim([])
plt.tight_layout()
legend = plt.legend(loc='upper right', shadow=True, fontsize='large', frameon=True)
if save:
plt.savefig(fname_save)
plt.close()
return
def plot_T_obs_3D(T_obs, save, fname_save):
""" Visualize the Volume Source Estimate as an Nifti1 file """
from matplotlib import cm as cm_mpl
fig = plt.figure(facecolor='w', figsize=(8, 8))
ax = fig.gca(projection='3d')
vertc, timez = np.mgrid[0:T_obs.shape[0], 0:T_obs.shape[1]]
Ts = T_obs
title = 'T Obs'
t_obs_stats = ax.plot_surface(vertc, timez, Ts, cmap=cm_mpl.hot) # , **kwargs)
# plt.set_xticks([])
# plt.set_yticks([])
ax.set_xlabel('times [ms]')
ax.set_ylabel('Vertice No')
ax.set_zlabel('Statistical Amplitude')
ax.w_zaxis.set_major_locator(LinearLocator(6))
ax.set_zlim(0, np.max(T_obs))
ax.set_title(title)
fig.colorbar(t_obs_stats, shrink=0.5)
plt.tight_layout()
plt.show()
if save:
plt.savefig(fname_save)
plt.close()
return
| bsd-3-clause |
NorfolkDataSci/presentations | 2018-01_chatbot/serverless-chatbots-workshop-master/LambdaFunctions/sentiment-analysis/nltk/draw/dispersion.py | 7 | 1744 | # Natural Language Toolkit: Dispersion Plots
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A utility for displaying lexical dispersion.
"""
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = list(map(str.lower, words))
text_to_comp = list(map(str.lower, text))
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = list(zip(*points))
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(list(range(len(words))), words, color="b")
pylab.ylim(-1, len(words))
pylab.title(title)
pylab.xlabel("Word Offset")
pylab.show()
if __name__ == '__main__':
import nltk.compat
from nltk.corpus import gutenberg
words = ['Elinor', 'Marianne', 'Edward', 'Willoughby']
dispersion_plot(gutenberg.words('austen-sense.txt'), words)
| mit |
caganze/wisps | wisps/simulations/sample_distances.py | 1 | 3737 | import numpy as np
from astropy.coordinates import SkyCoord
#from multiprocessing import Pool
from pathos.multiprocessing import ProcessingPool as Pool
from scipy.interpolate import interp1d
import scipy.integrate as integrate
import wisps
import pandas as pd
import wisps.simulations as wispsim
import pickle
from tqdm import tqdm
#contants
POINTINGS= pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl')
DISTANCE_LIMITS=[]
SPGRID=wispsim.SPGRID
Rsun=8300.
Zsun=27.
HS=[100, 150,200,250, 300,350, 400, 450, 500, 600, 700, 800, 1000]
dist_arrays=pd.DataFrame.from_records([x.dist_limits for x in POINTINGS]).applymap(lambda x:np.vstack(x).astype(float))
DISTANCE_LIMITS={}
for s in SPGRID:
DISTANCE_LIMITS[s]=dist_arrays[s].mean(axis=0)
#redefine magnitude limits by taking into account the scatter for each pointing
#use these to compute volumes
#REDEFINED_MAG_LIMITS={'F110': 23.054573, 'F140': 23.822972, 'F160' : 23.367867}
#-------------------------------------------
def density_function(r, z, h=300.):
"""pw4observer
A custom juric density function that only uses numpy arrays for speed
All units are in pc
"""
l = 2600. # radial length scale of exponential thin disk
zpart=np.exp(-abs(z-Zsun)/h)
rpart=np.exp(-(r-Rsun)/l)
return zpart*rpart
def custom_volume(l,b,dmin, dmax, h):
nsamp=10000
ds = np.linspace(dmin,dmax,nsamp)
rd=np.sqrt( (ds * np.cos( b ) )**2 + Rsun * (Rsun - 2 * ds * np.cos( b ) * np.cos( l ) ) )
zd=Zsun+ ds * np.sin( b - np.arctan( Zsun / Rsun) )
rh0=density_function(rd, zd,h=h )
val=integrate.trapz(rh0*(ds**2), x=ds)
return val
def interpolated_cdf(pnt, h):
l, b= pnt.coord.galactic.l.radian, pnt.coord.galactic.b.radian
d=np.concatenate([[0], np.logspace(-1, 4, int(1e3))])
#print (d)
cdfvals=np.array([custom_volume(l,b,0, dx, h) for dx in d])
cdfvals= cdfvals/np.nanmax(cdfvals)
return interp1d(d, cdfvals)
def draw_distance_with_cdf(pntname, dmin, dmax, nsample, h, interpolated_cdfs):
#draw distances using inversion of the cumulative distribution
d=np.logspace(np.log10(dmin), np.log10(dmax), int(nsample))
#print (d, dmin, dmax)
cdfvals=(interpolated_cdfs[pntname])(d)
return wisps.random_draw(d, cdfvals/np.nanmax(cdfvals), int(nsample))
def load_interpolated_cdfs(h, recompute=False):
if recompute:
small_inter={}
for p in tqdm(POINTINGS):
small_inter.update({p.name: interpolated_cdf(p, h)})
fl=wisps.OUTPUT_FILES+'/distance_sample_interpolations{}'.format(h)
with open(fl, 'wb') as file: pickle.dump(small_inter,file, protocol=pickle.HIGHEST_PROTOCOL)
return
else:
return pd.read_pickle(wisps.OUTPUT_FILES+'/distance_sample_interpolations{}'.format(h))
def paralle_sample(recompute=False):
#INTERPOLATED_CDFS= {}
#for h in HS:
# small_inter={}
# for p in POINTINGS:
# small_inter.update({p.name: interpolated_cdf(p, h)})
# INTERPOLATED_CDFS.update({h: small_inter })
DISTANCE_SAMPLES={}
PNTAMES=[x.name for x in POINTINGS]
dis={}
for h in HS:
for s in tqdm(DISTANCE_LIMITS.keys()):
cdf=load_interpolated_cdfs(h, recompute=recompute)
dlts=np.array(DISTANCE_LIMITS[s]).flatten()
fx= lambda x: draw_distance_with_cdf(x, 1., 2*dlts[0], int(5e4), h, cdf)
with Pool() as pool:
dx=pool.map(fx, PNTAMES)
dis.update({s: dx})
del dx
DISTANCE_SAMPLES.update({h: dis})
fl=wisps.OUTPUT_FILES+'/distance_samples{}'.format(h)
with open(fl, 'wb') as file: pickle.dump(DISTANCE_SAMPLES[h],file, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ =='__main__':
#for h in HS:
# _=load_interpolated_cdfs(h, recompute=True)
paralle_sample(recompute=False)
| mit |
wazeerzulfikar/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 23 | 3376 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn.linear_model import Ridge
from sklearn import datasets
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (Github issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
| bsd-3-clause |
tedunderwood/horizon | chapter2/surprise/create_surprise_metric.py | 1 | 2323 | #!/usr/bin/env python3
# main_experiment.py
import sys, os, csv, random
import numpy as np
import pandas as pd
date = int(sys.argv[1])
def addtodict(adict, afilename):
path = '../modeloutput/' + afilename + '.coefs.csv'
with open(path, encoding = 'utf-8') as f:
reader = csv.reader(f)
for row in reader:
word = row[0]
if len(word) < 1 or not word[0].isalpha():
continue
# we're just using alphabetic words for this
coef = float(row[2])
# note that a great deal depends on the difference between
# row[1] (the unadjusted coefficient) and row[2] (the
# coefficient divided by the .variance of the scaler for
# this word, aka "how much a single instance of the word
# moves the needle.")
if word in adict:
adict[word].append(coef)
else:
adict[word] = [coef]
return adict
periods = [(1870, 1899), (1900, 1929), (1930, 1959), (1960, 1989), (1990, 2010), (1880, 1909), (1910, 1939), (1940, 1969), (1970, 1999), (1890, 1919), (1920, 1949), (1950, 1979), (1980, 2009)]
# identify the periods at issue
for floor, ceiling in periods:
if ceiling+ 1 == date:
f1, c1 = floor, ceiling
if floor == date:
f2, c2 = floor, ceiling
new = dict()
old = dict()
for i in range(5):
for j in range(5):
for part in [1, 2]:
name1 = 'rccsf'+ str(f1) + '_' + str(c1) + '_' + str(i) + '_' + str(part)
name2 = 'rccsf'+ str(f2) + '_' + str(c2) + '_' + str(j) + '_' + str(part)
addtodict(old, name1)
addtodict(new, name2)
allwords = set([x for x in old.keys()]).union(set([x for x in new.keys()]))
with open('crudemetrics/surprise_in_' + str(date) + '.csv', mode = 'w', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = ['word', 'coef'])
scribe.writeheader()
for w in allwords:
if w in old:
oldcoef = sum(old[w]) / len(old[w])
else:
oldcoef = 0
if w in new:
newcoef = sum(new[w]) / len(new[w])
else:
newcoef = 0
o = dict()
o['word'] = w
o['coef'] = (newcoef - oldcoef) / 1000000
scribe.writerow(o)
| mit |
Myasuka/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
ultracoldYEG/cycle-control | CycleControl/cycle_plotter.py | 1 | 6041 | from PyQt5 import QtCore
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from CycleControl.objects.cycle import *
def prepare_sample_plot_data(domain, data):
new_domain = []
new_data = []
for i in range(1, len(domain)):
new_domain.append(domain[i - 1])
new_data.append(data[i - 1])
new_domain.append(domain[i])
new_data.append(data[i - 1])
new_domain.append(domain[-1])
new_data.append(data[-1])
return new_domain, new_data
class CyclePlotter(object):
def __init__(self, gui):
self.gui = gui
self.controller = gui.controller
self.fig = plt.Figure()
self.ax = self.fig.add_subplot(111)
self.step = 1
self.canvas = FigureCanvas(self.fig)
self.gui.plot_layout.addWidget(self.canvas)
self.toolbar = NavigationToolbar(self.canvas, self.gui.widget, coordinates=True)
self.gui.plot_layout.addWidget(self.toolbar)
self.toolbar.setFixedHeight(24)
self.canvas.draw()
self.cycle = None
self.gui.digital_channel_combo = CheckableComboBox()
self.gui.analog_channel_combo = CheckableComboBox()
self.gui.novatech_channel_combo = CheckableComboBox()
self.gui.gridLayout_10.addWidget(self.gui.digital_channel_combo, 2, 0)
self.gui.gridLayout_10.addWidget(self.gui.analog_channel_combo, 2, 1)
self.gui.gridLayout_10.addWidget(self.gui.novatech_channel_combo, 2, 2)
self.gui.plot_button.clicked.connect(self.update_data)
self.gui.cycle_plot_number.valueChanged.connect(self.update_step)
self.update_channels()
def update_channels(self):
self.gui.digital_channel_combo.clear()
self.gui.analog_channel_combo.clear()
self.gui.novatech_channel_combo.clear()
for board in self.controller.hardware.pulseblasters:
for i, channel in enumerate(board.channels):
if channel.enabled:
label = 'Board {} - {} {}'.format(board.id, i, channel.label)
self.add_checkable_combo_item(self.gui.digital_channel_combo, label, board.id, i)
for board in self.controller.hardware.ni_boards:
for i, channel in enumerate(board.channels):
if channel.enabled:
label = '{} - {} {}'.format(board.id, i, channel.label)
self.add_checkable_combo_item(self.gui.analog_channel_combo, label, board.id, i)
for board in self.controller.hardware.novatechs:
for i, channel in enumerate(board.channels):
if channel.enabled:
for j, param in enumerate(['Amp', 'Freq', 'Phase']):
label = '{} - {} {} ({})'.format(board.id, i, channel.label, param)
self.add_checkable_combo_item(self.gui.novatech_channel_combo, label, board.id, 3*i + j)
def update_step(self, val):
self.step = val
self.update_data()
def update_data(self):
if not len(self.controller.proc_params.instructions):
print('Put in more instructions')
return
self.fig.clf()
self.ax = self.fig.add_subplot(111)
self.ax.grid()
self.cycle = Cycle(self.controller.proc_params.instructions, self.controller.proc_params.get_cycle_variables(self.step - 1))
self.cycle.create_waveforms()
self.plot_digital_channels()
self.plot_analog_channels()
self.plot_novatech_channels()
self.canvas.draw()
def add_checkable_combo_item(self, combo, name, board_id, channel_num):
combo.addItem(str(name))
item = combo.model().item(combo.count()-1, 0)
item.setCheckState(QtCore.Qt.Unchecked)
item.appendColumn([QStandardItem('test!')])
item.setData((board_id, channel_num))
def plot_analog_channels(self):
for i in range(self.gui.analog_channel_combo.count()):
item = self.gui.analog_channel_combo.model().item(i, 0)
if item.checkState():
analog_domain = self.cycle.analog_domain
analog_data = self.cycle.analog_data.get(item.data()[0])[item.data()[1]]
x, y = prepare_sample_plot_data(analog_domain, analog_data)
self.ax.plot(x, y, marker='o', markersize=2)
def plot_novatech_channels(self):
for i in range(self.gui.novatech_channel_combo.count()):
item = self.gui.novatech_channel_combo.model().item(i, 0)
if item.checkState():
novatech_domain = self.cycle.novatech_domain
novatech_data = self.cycle.novatech_data.get(item.data()[0])[item.data()[1]]
x, y = prepare_sample_plot_data(novatech_domain, novatech_data)
self.ax.plot(x, y, marker='o', markersize=2)
def plot_digital_channels(self):
for i in range(self.gui.digital_channel_combo.count()):
item = self.gui.digital_channel_combo.model().item(i, 0)
if item.checkState():
digital_domain = self.cycle.digital_domain
board_data = self.cycle.digital_data.get(item.data()[0])
digital_data = [int(x[item.data()[1]]) for x in board_data]
x, y = prepare_sample_plot_data(digital_domain, digital_data)
self.ax.plot(x, y, marker='o', markersize=2)
class CheckableComboBox(QComboBox):
def __init__(self):
super(CheckableComboBox, self).__init__()
self.view().pressed.connect(self.handleItemPressed)
self.setModel(QStandardItemModel(self))
def handleItemPressed(self, index):
item = self.model().itemFromIndex(index)
if item.checkState() == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
item.setCheckState(QtCore.Qt.Checked) | mit |
DStauffman/dstauffman2 | dstauffman2/games/fiver.py | 1 | 25942 | """
The "fiver" file solves the geometric puzzle with twelve pieces made of all unique combinations of
five squares that share adjacent edges. Then these 12 pieces are layed out into boards of sixty
possible places in different orientations. I'm unaware of a generic name for this game.
Notes
-----
#. Written by David C. Stauffer in October 2015 when he found the puzzle on his dresser while
acquiring and rearranging some furniture.
"""
#%% Imports
import doctest
import os
import pickle
import unittest
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle
from dstauffman import setup_dir
from dstauffman.plotting import ColorMap, Opts, setup_plots
from dstauffman2 import get_root_dir
#%% Hard-coded values
SIZE_PIECES = 5
NUM_PIECES = 12
NUM_ORIENTS = 8
# build colormap
cm = ColorMap('Paired', 0, NUM_PIECES-1)
COLORS = ['w'] + [cm.get_color(i) for i in range(NUM_PIECES)] + ['k']
# make boards
BOARD1 = np.full((14, 18), NUM_PIECES+1, dtype=int)
BOARD1[4:10,4:14] = 0
BOARD2 = np.full((16, 16), NUM_PIECES+1, dtype=int)
BOARD2[4:12, 4:12] = 0
BOARD2[7:9, 7:9] = NUM_PIECES+1
#%% Functions - _pad_piece
def _pad_piece(piece, max_size, pad_value=0):
r"""
Pads a piece to a given size.
Parameters
----------
piece : 2D ndarray
Piece or board to pad
max_size : int, or 2 element list of int
Maximum size of each axis
pad_value : int, optional, default is 0
Value to use when padding
Returns
-------
new_piece : 2D ndarray of int
Padded piece or board
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman2.games.fiver import _pad_piece
>>> import numpy as np
>>> piece = np.array([[1, 1, 1, 1], [0, 0, 0, 1]], dtype=int)
>>> max_size = 5
>>> new_piece = _pad_piece(piece, max_size)
>>> print(new_piece)
[[1 1 1 1 0]
[0 0 0 1 0]
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]]
"""
# determine if max_size is a scalar or specified per axis
if np.isscalar(max_size):
max_size = [max_size, max_size]
# get the current size
(i, j) = piece.shape
# initialize the output
new_piece = piece.copy()
# pad the horizontal direction
if j < max_size[0]:
new_piece = np.hstack((new_piece, np.full((i, max_size[1]-j), pad_value, dtype=int)))
# pad the vertical direction
if i < max_size[1]:
new_piece = np.vstack((new_piece, np.full((max_size[0]-i, max_size[1]), pad_value, dtype=int)))
# return the resulting piece
return new_piece
#%% Functions - _shift_piece
def _shift_piece(piece):
r"""
Shifts a piece to the most upper left location within an array.
Parameters
----------
piece : 2D ndarray of int
Piece
Returns
-------
new_piece : 2D ndarray of int
Shifted piece
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman2.games.fiver import _shift_piece
>>> import numpy as np
>>> x = np.zeros((5,5), dtype=int)
>>> x[1, :] = 1
>>> y = _shift_piece(x)
>>> print(y)
[[1 1 1 1 1]
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]]
"""
new_piece = piece.copy()
ix = [1, 2, 3, 4, 0]
while np.all(new_piece[0, :] == 0):
new_piece = new_piece[ix, :]
while np.all(new_piece[:, 0] == 0):
new_piece = new_piece[:, ix]
return new_piece
#%% Functions - _rotate_piece
def _rotate_piece(piece):
r"""
Rotates a piece 90 degrees to the left.
Parameters
----------
piece : 2D ndarray of int
Piece
Returns
-------
new_piece : 2D ndarray of int
Rotated piece
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman2.games.fiver import _rotate_piece
>>> import numpy as np
>>> x = np.arange(25).reshape((5, 5))
>>> y = _rotate_piece(x)
>>> print(y)
[[ 4 9 14 19 24]
[ 3 8 13 18 23]
[ 2 7 12 17 22]
[ 1 6 11 16 21]
[ 0 5 10 15 20]]
"""
# rotate the piece
temp_piece = np.rot90(piece)
# shift to upper left most position
new_piece = _shift_piece(temp_piece)
return new_piece
#%% Functions - _flip_piece
def _flip_piece(piece):
r"""
Flips a piece about the horizontal axis.
Parameters
----------
piece : 2D ndarray of int
Piece
Returns
-------
new_piece : 2D ndarray of int
Shifted piece
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman2.games.fiver import _flip_piece
>>> import numpy as np
>>> x = np.arange(25).reshape((5, 5))
>>> y = _flip_piece(x)
>>> print(y)
[[20 21 22 23 24]
[15 16 17 18 19]
[10 11 12 13 14]
[ 5 6 7 8 9]
[ 0 1 2 3 4]]
"""
# flip and shift to upper left most position
return _shift_piece(np.flipud(piece))
#%% Functions - _get_unique_pieces
def _get_unique_pieces(pieces):
r"""
Returns the indices to the first dimension for the unique pieces.
Parameters
----------
pieces : 3D ndarray of int
3D array of pieces
Returns
-------
ix_unique : ndarray of int
Unique indices
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman2.games.fiver import _get_unique_pieces, _rotate_piece
>>> import numpy as np
>>> pieces = np.zeros((3, 5, 5), dtype=int)
>>> pieces[0, :, 0] = 1
>>> pieces[1, :, 1] = 1
>>> pieces[2, :, 0] = 1
>>> ix_unique = _get_unique_pieces(pieces)
>>> print(ix_unique)
[0, 1]
"""
# find the number of pieces
num = pieces.shape[0]
# initialize some lists
ix_unique = []
sets = []
# loop through pieces
for ix in range(num):
# alias this piece
this_piece = pieces[ix]
# find the indices in the single vector version of the array and convert to a unique set
inds = set(np.flatnonzero(this_piece.ravel()))
# see if this set is in the master set
if inds not in sets:
# if not, then this is a new unique piece, so keep it
ix_unique.append(ix)
sets.append(inds)
return ix_unique
#%% Functions - _display_progress
def _display_progress(ix, nums, last_ratio=0):
r"""
Displays the total progress to the command window.
Parameters
----------
ix : 1D ndarray of int
Index into which pieces are being evaluated
nums : 1D ndarray of int
Possible permutations for each individual piece
Returns
-------
ratio : float
The amount of possible permutations that have been evaluated
Notes
-----
#. Written by David C. Stauffer in November 2015.
Examples
--------
>>> from dstauffman2.games.fiver import _display_progress
>>> import numpy as np
>>> ix = np.array([1, 0, 4, 0])
>>> nums = np.array([2, 4, 8, 16])
>>> ratio = _display_progress(ix, nums) # ratio = (512+64)/1024
Progess: 56.2%
"""
# determine the number of permutations in each piece level
complete = np.flipud(np.cumprod(np.flipud(nums.astype(np.float))))
# count how many branches have been evaluated
done = 0
for i in range(len(ix)):
done = done + ix[i]*complete[i]/nums[i]
# determine the completion ratio
ratio = done / complete[0]
# print the status
if np.round(1000*ratio) > np.round(1000*last_ratio):
print('Progess: {:.1f}%'.format(ratio*100))
return ratio
else:
return last_ratio
#%% Functions - _blobbing
def _blobbing(board):
r"""Blobbing algorithm 2. Checks that all empty blobs are multiples of 5 squares."""
# set sizes
(m, n) = board.shape
# initialize some lists, labels and counter
linked = []
labels = np.zeros((m, n), dtype=int)
counter = 0
# first pass
for i in range(m):
for j in range(n):
# see if this is an empty piece
if board[i, j]:
# get the north and west neighbors
if i > 0:
north = labels[i-1, j]
else:
north = 0
if j > 0:
west = labels[i, j-1]
else:
west = 0
# check one of four conditions
if north > 0:
if west > 0:
# both neighbors are valid
if north == west:
# simple case with same labels
labels[i, j] = north
else:
# neighbors have different labels, so combine the sets
min_label = min(north, west)
labels[i, j] = min_label
linked[north-1] = linked[north-1] | {west}
linked[west-1] = linked[west-1] | {north}
else:
# join with north neighbor
labels[i, j] = north
else:
if west > 0:
# join with west neighbor
labels[i, j] = west
else:
# not part of a previous blob, so start a new one
counter += 1
labels[i, j] = counter
linked.append({counter})
# second pass
for i in range(m):
for j in range(n):
if board[i, j]:
labels[i, j] = min(linked[labels[i, j] - 1])
# check for valid blob sizes
for s in range(np.max(labels)):
size = np.count_nonzero(labels == s + 1)
if np.mod(size, SIZE_PIECES) != 0:
return False
return True
#%% Functions - _save_solution
def _save_solution(solutions, this_board):
r"""Saves the given solution if it's unique."""
if len(solutions) == 0:
# if this is the first solution, then simply save it
solutions.append(this_board.copy())
else:
# determine if unique
temp = this_board.copy()
(m, n) = temp.shape
rots = NUM_ORIENTS//2
for i in range(rots):
temp = _rotate_piece(temp)
if temp.shape[0] != m or temp.shape[1] != n:
continue
for j in range(len(solutions)):
if not np.any(solutions[j] - temp):
return
temp = _flip_piece(temp)
for i in range(rots):
temp = _rotate_piece(temp)
if temp.shape[0] != m or temp.shape[1] != n:
continue
for j in range(len(solutions)):
if not np.any(solutions[j] - temp):
return
solutions.append(this_board.copy())
print('Solution {} found!'.format(len(solutions)))
#%% Functions - make_all_pieces
def make_all_pieces():
r"""
Makes all the possible pieces of the game.
Returns
-------
pieces : 3D ndarray of int
3D array of pieces
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman2.games.fiver import make_all_pieces
>>> pieces = make_all_pieces()
>>> print(pieces[0])
[[1 1 1 1 1]
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]]
"""
# Hard-coded values
p1 = np.array([[1, 1, 1, 1, 1]])
p2 = np.array([\
[1, 1, 1, 1], \
[0, 0, 0, 1]])
p3 = np.array([\
[1, 1, 1, 1], \
[0, 0, 1, 0]])
p4 = np.array([\
[1, 1, 1, 0], \
[0, 0, 1, 1]])
p5 = np.array([\
[1, 1, 1], \
[1, 0, 0], \
[1, 0, 0]])
p6 = np.array([\
[1, 1, 1], \
[0, 1, 0], \
[0, 1, 0]])
p7 = np.array([\
[0, 1, 0], \
[1, 1, 1], \
[0, 1, 0]])
p8 = np.array([\
[1, 1, 1], \
[1, 1, 0]])
p9 = np.array([\
[1, 1, 0], \
[0, 1, 0], \
[0, 1, 1]])
p10 = np.array([\
[1, 1], \
[1, 0], \
[1, 1]])
p11 = np.array([\
[1, 1, 0], \
[0, 1, 1], \
[0, 1, 0]])
p12 = np.array([\
[0, 1, 1], \
[1, 1, 0], \
[1, 0, 0]])
# preallocate output
pieces = np.full((NUM_PIECES, SIZE_PIECES, SIZE_PIECES), -1, dtype=int);
for (ix, this_piece) in enumerate([p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12]):
# pad each piece
new_piece = _pad_piece(this_piece, SIZE_PIECES)
# save this piece with an appropriate numerical value
pieces[ix] = (ix + 1) * new_piece
return pieces
#%% Functions - make_all_permutations
def make_all_permutations(pieces):
r"""
Makes all the possible permutations of every possible piece.
Parameters
----------
pieces : 3D ndarray of int
3D array of pieces
Returns
-------
all_pieces : list of 3D ndarray of int
List of all 3D array of piece permutations
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman2.games.fiver import make_all_pieces, make_all_permutations
>>> pieces = make_all_pieces()
>>> all_pieces = make_all_permutations(pieces)
"""
# initialize the output
all_pieces = []
# loop through all the pieces
for ix in range(NUM_PIECES):
# preallocate the array
all_this_piece = np.full((NUM_ORIENTS, SIZE_PIECES, SIZE_PIECES), -1, dtype=int)
# alias this piece
this_piece = pieces[ix]
# find the number of rotations (4)
rots = NUM_ORIENTS//2
# do the rotations and keep each piece
for counter in range(rots):
this_piece = _rotate_piece(this_piece)
all_this_piece[counter] = this_piece
# flip the piece
this_piece = _flip_piece(this_piece)
# do another set of rotations and keep each piece
for counter in range(rots):
this_piece = _rotate_piece(this_piece)
all_this_piece[counter+rots] = this_piece
# find the indices to the unique pieces
ix_unique = _get_unique_pieces(all_this_piece)
# gather the unique combinations
all_pieces.append(all_this_piece[ix_unique])
return all_pieces
#%% Functions - is_valid
def is_valid(board, piece, use_blobbing=True):
r"""
Determines if the piece is valid for the given board.
Parameters
----------
board : 2D ndarray
Board
piece : 2D or 3D ndarray
Piece
use_blobbing : bool, optional
Whether to look for continuous blobs that show the board will not work
Returns
-------
out : ndarray of bool
True/False flags for whether the pieces were valid.
Notes
-----
#. Written by David C. Stauffer in November 2015.
Examples
--------
>>> from dstauffman2.games.fiver import is_valid
>>> import numpy as np
>>> board = np.ones((5, 5), dtype=int)
>>> board[1:-1, 1:-1] = 0
>>> piece = np.zeros((5,5), dtype=int)
>>> piece[1, 1:4] = 2
>>> piece[2, 2] = 2
>>> out = is_valid(board, piece)
>>> print(out)
True
"""
# check if only one piece
if piece.ndim == 2:
# do simple test first
out = np.logical_not(np.any(board * piece))
# see if blobbing
if out and use_blobbing:
out = _blobbing((board + piece) == 0)
elif piece.ndim == 3:
# do multiple pieces
temp = np.expand_dims(board, axis=0) * piece
out = np.logical_not(np.any(np.any(temp, axis=2), axis=1))
if np.any(out) and use_blobbing:
for k in range(piece.shape[0]):
if out[k]:
out[k] = _blobbing((board + piece[k]) == 0)
else:
raise ValueError('Unexpected number of dimensions for piece = "{}"'.format(piece.ndim))
return out
#%% Functions - find_all_valid_locations
def find_all_valid_locations(board, all_pieces):
r"""Finds all the valid locations for each piece on the board."""
(m, n) = board.shape
max_pieces = (m - SIZE_PIECES - 1)*(n - SIZE_PIECES - 1) * NUM_ORIENTS
locations = []
for these_pieces in all_pieces:
# over-allocate a possible array
these_locs = np.zeros((max_pieces, m, n), dtype=int)
counter = 0
for ix in range(these_pieces.shape[0]):
start_piece = _pad_piece(these_pieces[ix,:,:], board.shape)
for i in range(m - SIZE_PIECES + 1):
this_piece = np.roll(start_piece, i, axis=0)
if is_valid(board, this_piece):
these_locs[counter] = this_piece
counter += 1
for j in range(1, n - SIZE_PIECES + 1):
this_piece2 = np.roll(this_piece, j, axis=1)
if is_valid(board, this_piece2):
these_locs[counter] = this_piece2
counter += 1
locations.append(these_locs[:counter])
# resort pieces based on numbers, for lowest to highest
sort_ix = np.array([x.shape[0] for x in locations]).argsort()
locations = [locations[ix] for ix in sort_ix]
return locations
#%% Functions - solve_puzzle
def solve_puzzle(board, locations, find_all=False):
r"""Solves the puzzle for the given board and all possible piece locations."""
# initialize the solutions
solutions = []
# create a working board
this_board = board.copy()
# get the number of permutations for each piece
nums = np.array([x.shape[0] for x in locations])
# start solving
last_ratio = 0
ix0 = np.arange(locations[0].shape[0])
for i0 in ix0:
np.add(this_board, locations[0][i0], this_board)
ix1 = np.flatnonzero(is_valid(this_board, locations[1]))
for i1 in ix1:
np.add(this_board, locations[1][i1], this_board)
ix2 = np.flatnonzero(is_valid(this_board, locations[2]))
for i2 in ix2:
np.add(this_board, locations[2][i2], this_board)
ix3 = np.flatnonzero(is_valid(this_board, locations[3]))
for i3 in ix3:
# display progress
last_ratio = _display_progress(np.array([i0, i1, i2, i3]), nums, last_ratio)
np.add(this_board, locations[3][i3], this_board)
ix4 = np.flatnonzero(is_valid(this_board, locations[4]))
for i4 in ix4:
np.add(this_board, locations[4][i4], this_board)
ix5 = np.flatnonzero(is_valid(this_board, locations[5]))
for i5 in ix5:
np.add(this_board, locations[5][i5], this_board)
ix6 = np.flatnonzero(is_valid(this_board, locations[6]))
for i6 in ix6:
np.add(this_board, locations[6][i6], this_board)
ix7 = np.flatnonzero(is_valid(this_board, locations[7]))
for i7 in ix7:
np.add(this_board, locations[7][i7], this_board)
ix8 = np.flatnonzero(is_valid(this_board, locations[8]))
for i8 in ix8:
np.add(this_board, locations[8][i8], this_board)
ix9 = np.flatnonzero(is_valid(this_board, locations[9]))
for i9 in ix9:
np.add(this_board, locations[9][i9], this_board)
ix10 = np.flatnonzero(is_valid(this_board, locations[10]))
for i10 in ix10:
np.add(this_board, locations[10][i10], this_board)
ix11 = np.flatnonzero(is_valid(this_board, locations[11]))
for i11 in ix11:
np.add(this_board, locations[11][i11], this_board)
# keep solution
_save_solution(solutions, this_board)
if not find_all:
return solutions
np.subtract(this_board, locations[11][i11], this_board)
np.subtract(this_board, locations[10][i10], this_board)
np.subtract(this_board, locations[9][i9], this_board)
np.subtract(this_board, locations[8][i8], this_board)
np.subtract(this_board, locations[7][i7], this_board)
np.subtract(this_board, locations[6][i6], this_board)
np.subtract(this_board, locations[5][i5], this_board)
np.subtract(this_board, locations[4][i4], this_board)
np.subtract(this_board, locations[3][i3], this_board)
np.subtract(this_board, locations[2][i2], this_board)
np.subtract(this_board, locations[1][i1], this_board)
np.subtract(this_board, locations[0][i0], this_board)
return solutions
#%% Functions - plot_board
def plot_board(board, title, opts=None):
r"""Plots the board or the individual pieces."""
# hard-coded square size
box_size = 1
# check for opts
if opts is None:
opts = Opts()
# turn interactive plotting off
plt.ioff()
# create the figure
fig = plt.figure()
# create the axis
ax = fig.add_subplot(111)
# set the title
fig.canvas.manager.set_window_title(title)
ax.set_title(title)
# draw each square
for i in range(board.shape[0]):
for j in range(board.shape[1]):
# add the rectangle patch to the existing axis
ax.add_patch(Rectangle((box_size*i,box_size*j),box_size, box_size, \
facecolor=COLORS[board[i,j]], edgecolor='k'))
# make square
ax.set_aspect('equal')
# set limits
ax.set_xlim(0, board.shape[0])
ax.set_ylim(0, board.shape[1])
# flip the vertical axis
ax.invert_yaxis()
# configure the plot
setup_plots(fig, opts)
# return the resulting figure handle
return fig
#%% Functions - test_docstrings
def test_docstrings():
r"""Tests the docstrings within this file."""
unittest.main(module='dstauffman2.games.test_fiver', exit=False)
doctest.testmod(verbose=False)
#%% Main script
if __name__ == '__main__':
# flags for running code
run_tests = True
make_plots = True
make_soln = True
find_all = True
save_results = True
if run_tests:
# Run docstring test
test_docstrings()
# make all the pieces
pieces = make_all_pieces()
# create all the possible permutations of all the pieces
all_pieces = make_all_permutations(pieces)
# Create and set Opts
date = datetime.now()
opts = Opts()
opts.save_path = os.path.join(get_root_dir(), 'results', date.strftime('%Y-%m-%d') + '_fiver')
opts.save_plot = True
opts.show_plot = False
# Save plots of the possible piece orientations
if make_plots:
setup_dir(opts.save_path, rec=True)
for (ix, these_pieces) in enumerate(all_pieces):
for ix2 in range(these_pieces.shape[0]):
this_title = 'Piece {}, Permutation {}'.format(ix+1, ix2+1)
fig = plot_board(these_pieces[ix2], this_title, opts=opts)
plt.close(fig)
# print empty boards
fig = plot_board(BOARD1[3:-3,3:-3], 'Empty Board 1', opts=opts)
plt.close(fig)
fig = plot_board(BOARD2[3:-3,3:-3], 'Empty Board 2', opts=opts)
plt.close(fig)
# solve the puzzle
locations1 = find_all_valid_locations(BOARD1, all_pieces)
locations2 = find_all_valid_locations(BOARD2, all_pieces)
if make_soln:
print('Solving puzzle 1.')
solutions1 = solve_puzzle(BOARD1, locations1, find_all=find_all)
print('Solving puzzle 2.')
solutions2 = solve_puzzle(BOARD2, locations2, find_all=find_all)
# save the results
if save_results:
with open(os.path.join(opts.save_path, 'solutions1.pkl'), 'wb') as file:
pickle.dump(solutions1, file)
with open(os.path.join(opts.save_path, 'solutions2.pkl'), 'wb') as file:
pickle.dump(solutions2, file)
# plot the results
if make_soln and solutions1:
opts.show_plot = True
figs1 = []
for i in range(len(solutions1)):
this_title = 'Puzzle 1, Solution {}'.format(i+1)
figs1.append(plot_board(solutions1[i][3:-3,3:-3], this_title, opts=opts))
if np.mod(i, 10) == 0:
while figs1:
plt.close(figs1.pop())
if make_soln and solutions2:
opts.show_plot = True
figs2 = []
for i in range(len(solutions2)):
this_title = 'Puzzle 2, Solution {}'.format(i+1)
figs2.append(plot_board(solutions2[i][3:-3,3:-3], this_title, opts=opts))
if np.mod(i, 10) == 0:
while figs2:
plt.close(figs2.pop())
| lgpl-3.0 |
assisi/assisipy-lib | assisipy_utils/arena/constructors.py | 2 | 15774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author : Rob Mills, BioISI, FCUL.
: ASSISIbf project
Abstract : Constructor classes of enclosures for agents.
'''
from math import pi
from transforms import Point, Transformation
from transforms import rotate_polygon, translate_seq, apply_transform_to_group, xy_from_seq
from minimal_arenas import create_arc_with_width
import yaml
import itertools
ORIGIN = (0, 0, 0)
#{{{ Base class
class BaseArena(object):
inst_id = itertools.count().next
def __init__(self, ww=1.0, **kwargs):
self.ww = ww
self.bl_bound = (0,0)
self.tr_bound = (0,0)
self.trans = Transformation()
self.segs = []
self.color = kwargs.get('color', (0.5, 0.5, 0.5))
label_default = "arena-{}".format(BaseArena.inst_id())
self.label_stub = kwargs.get('label_stub', label_default)
self.height = kwargs.get('height', 1.0)
# not much point in getter/setter is there
def set_wall_color(self, clr):
self.color = clr
def transform(self, trans):
''' apply transform to segments, in place '''
self.trans = Transformation(dx=trans.dx, dy=trans.dy, theta=trans.theta)
self.segs = apply_transform_to_group(self.segs, trans)
def write_bounds_spec(self, fname, ):
'''
write in a consistent way the specification of an arena
to include the bounds and the transform.
'''
# construct spec dictionary
bs = {
'base_bl': self.bl_bound,
'base_tr': self.tr_bound,
'trans' : {
'dx' : self.trans.dx,
'dy' : self.trans.dy,
'theta' : self.trans.theta,
},
}
# write it to yaml file
with open(fname, 'w') as f:
yaml.safe_dump(bs, f, default_flow_style=False)
def transformed(self, trans):
'''
apply a transform to a COPY of segments, and return/
'''
return apply_transform_to_group(self.segs, trans)
def get_valid_zone(self):
return (self.bl_bound, self.tr_bound)
def get_valid_zone_rect(self):
'''
return parameters xy, xspan, yspan suitable for rendering a rectangle
by matplotlib.patches.Rectangle (or similar).
'''
dx = (self.tr_bound[0] - self.bl_bound[0])
dy = (self.tr_bound[1] - self.bl_bound[1])
return self.bl_bound, dx, dy
def _spawn_polygon(self, simctrl, poly, label,
height=1, color=(0,0,1)):
simctrl.spawn('Physical', label, ORIGIN, polygon=poly, color=color,
height=height)
def spawn(self, simctrl, verb=False, offset=0):
'''
spawn the arena in the ASSISI playground instance with handle `simctrl`.
The segments obtain names with numerical suffix. Optional int argument
`offset` increments the starting index.
'''
for i, poly in enumerate(self.segs):
label = "{}-{:03d}".format(self.label_stub, i+offset)
pts = xy_from_seq(poly)
if verb:
print "[I] attempting to spawn segment {}".format(label)
self._spawn_polygon(simctrl, pts, label, height=self.height, color=self.color)
#}}}
#{{{ StadiumArena
class StadiumArena(BaseArena):
def __init__(self, width=6.0, length=16.0, arc_steps=9, ww=1.0,
bee_len=1.5,
**kwargs):
'''
A rectangle with semi-circular ends. This corresponds to one of the
enclosures used in the Graz bee lab, and is suitable for two CASUs.
By default, this arena will be positioned horizontally, about (0, 0).
Transforms can be applied.
'''
super(StadiumArena, self).__init__(ww=ww, **kwargs)
self.width = width
self.length = length
# compute geometry
arc_rad = width / 2.0 - ww / 2.0
l_middle_seg = length - width # 2 * arc_rad
# create polygons for the two long/parallel walls
#wall_long = ( (0, 0), (l_middle_seg, 0), (l_middle_seg, ww), (0, ww), (0, 0) )
lms = l_middle_seg # shorthand
wall_long = ( (-lms/2.0, -ww/2.0), (+lms/2.0, -ww/2.0),
(+lms/2.0, +ww/2.0), (-lms/2.0, +ww/2.0),
(-lms/2.0, -ww/2.0))
poly_wl = [Point(x, y, 0) for (x, y) in wall_long]
s = [(0, -width/2.0+ww/2.0, 0), poly_wl]
n = [(0, +width/2.0-ww/2.0, 0), poly_wl]
# now we need two arcs.
# centre of the RH arc is ...
# c_r = [(ex+2)*l , 3*l / 2.0 ]
# c_l = [(0, 3*l / 2.0 ]
arc_r = create_arc_with_width(cx=+lms/2.0, cy=0,
radius=arc_rad,
theta_0=pi/2.0, theta_end=-pi/2,
steps=arc_steps, width=ww)
arc_l = create_arc_with_width(cx=-lms/2.0, cy=0,
radius=arc_rad,
theta_0=pi/2.0, theta_end=3*pi/2.0,
steps=arc_steps, width=ww)
# compile a list of segments
segs = []
for origin, poly in [s, n]:
# create relevant polygon with correct offset/position
(xo, yo, theta) = origin
ctr = Point(poly[0].x, poly[0].y) # rotate segment about its bottom left corner
seg = rotate_polygon(poly, ctr, theta) # do the rotation
seg = translate_seq(seg, dx=xo, dy=yo) # now translate the segment
segs.append(seg)
segs.extend(arc_r)
segs.extend(arc_l)
self.segs = segs
### compute the bounds within which agent bees can be spawned ###
# for simplicity, we assume that the valid zone is only between the
# parallel section, since random generation with curved bounds is likely
# going to be a pain (unless we do generate & test - then have to write
# something to do a 'hit test')
k = bee_len /2.0 # don't allow bees to be spawned in the wall
s_x, s_y, s_yaw = s[0]
n_x, n_y, n_yaw = n[0]
self.bl_bound = (s_x + poly_wl[0].x + k, s_y + poly_wl[3].y + k )
self.tr_bound = (n_x + poly_wl[2].x - k, n_y + poly_wl[1].y - k )
#}}}
#{{{ RoundedRectArena
class RoundedRectArena(BaseArena):
def __init__(self, width=6.0, length=16.0, arc_steps=9, ww=1.0,
corner_rad=1.5, bee_len=1.5,
**kwargs):
'''
A rectangle with rounded corners.
Warning... It may be patented by apple (if you use this to design your
own tablet) D670,286S.
By default, this arena will be positioned horizontally, about (0, 0).
Transforms can be applied.
'''
super(RoundedRectArena, self).__init__(ww=ww, **kwargs)
self.width = width
self.length = length
if (corner_rad > width / 2.0 or corner_rad > length/2.0):
raise ValueError("[E] cannot construct arena with bigger corners than either dimension")
# compute geometry
l_horiz_seg = length - (2.0 * corner_rad)
l_verti_seg = width - (2.0 * corner_rad)
# shorthands
lhz = l_horiz_seg
lvt = l_verti_seg
wall_horiz = ( (-lhz/2.0, -ww/2.0), (+lhz/2.0, -ww/2.0),
(+lhz/2.0, +ww/2.0), (-lhz/2.0, +ww/2.0),
(-lhz/2.0, -ww/2.0))
wall_verti = (
(-ww/2.0, -lvt/2.0), # 1
(+ww/2.0, -lvt/2.0), # 4
(+ww/2.0, +lvt/2.0), # 3
(-ww/2.0, +lvt/2.0), # 2
(-ww/2.0, -lvt/2.0), # 1
)
poly_wh = [Point(x, y, 0) for (x, y) in wall_horiz]
poly_wv = [Point(x, y, 0) for (x, y) in wall_verti]
# create polygons for the two long/parallel walls
s = [(0, -width/2.0+ww/2.0, 0), poly_wh]
n = [(0, +width/2.0-ww/2.0, 0), poly_wh]
w = [(-length/2.0+ww/2.0, 0, 0), poly_wv]
e = [(+length/2.0-ww/2.0, 0, 0), poly_wv]
# now we need an arc for each corner
ww2 = ww/2.0
arc_tl = create_arc_with_width(cx=-lhz/2.0+ww2, cy=+lvt/2.0-ww2,
radius=corner_rad, theta_0=pi/2.0,
theta_end=pi, width=ww)
arc_tr = create_arc_with_width(cx=+lhz/2.0-ww2, cy=+lvt/2.0-ww2,
radius=corner_rad, theta_0=pi/2.0,
theta_end=0, width=ww)
arc_bl = create_arc_with_width(cx=-lhz/2.0+ww2, cy=-lvt/2.0+ww2,
radius=corner_rad, theta_0=pi,
theta_end=1.5*pi, width=ww)
arc_br = create_arc_with_width(cx=+lhz/2.0-ww2, cy=-lvt/2.0+ww2,
radius=corner_rad, theta_0=0,
theta_end=-pi/2.0, width=ww)
# compile a list of segments
segs = []
for origin, poly in [s, n, e, w]:
# create relevant polygon with correct offset/position
(xo, yo, theta) = origin
ctr = Point(poly[0].x, poly[0].y) # rotate segment about its bottom left corner
seg = rotate_polygon(poly, ctr, theta) # do the rotation
seg = translate_seq(seg, dx=xo, dy=yo) # now translate the segment
segs.append(seg)
segs += arc_tl + arc_tr + arc_bl + arc_br
self.segs = segs
### compute the bounds within which agent bees can be spawned ###
# for simplicity, we assume that the valid zone is only between the
# parallel section, since random generation with curved bounds is likely
# going to be a pain (unless we do generate & test - then have to write
# something to do a 'hit test')
k = bee_len /2.0 # don't allow bees to be spawned in the wall
s_x, s_y, s_yaw = s[0]
n_x, n_y, n_yaw = n[0]
self.bl_bound = (s_x + poly_wh[0].x + k, s_y + poly_wh[3].y + k )
self.tr_bound = (n_x + poly_wh[2].x - k, n_y + poly_wh[1].y - k )
#}}}
#{{{ RoundedRectBarrier
class RoundedRectBarrier(BaseArena):
def __init__(self, width=6.0, length=16.0, arc_steps=9, ww=1.0,
corner_rad=1.5, bee_len=1.5, edges=['n','e','s','w'],
**kwargs):
'''
A barrier that is a subset of the RoundedRect.
Specify from [n,e,s,w] edges. Appropriate corners are retained.
'''
# TODO: consolidate all shared code with roundedrectArena if possible.
super(RoundedRectBarrier, self).__init__(ww=ww, **kwargs)
self.width = width
self.length = length
if (corner_rad > width / 2.0 or corner_rad > length/2.0):
raise ValueError("[E] cannot construct arena with bigger corners than either dimension")
# compute geometry
l_horiz_seg = length - (2.0 * corner_rad)
l_verti_seg = width - (2.0 * corner_rad)
# shorthands
lhz = l_horiz_seg
lvt = l_verti_seg
wall_horiz = ( (-lhz/2.0, -ww/2.0), (+lhz/2.0, -ww/2.0),
(+lhz/2.0, +ww/2.0), (-lhz/2.0, +ww/2.0),
(-lhz/2.0, -ww/2.0))
wall_verti = (
(-ww/2.0, -lvt/2.0), # 1
(+ww/2.0, -lvt/2.0), # 4
(+ww/2.0, +lvt/2.0), # 3
(-ww/2.0, +lvt/2.0), # 2
(-ww/2.0, -lvt/2.0), # 1
)
poly_wh = [Point(x, y, 0) for (x, y) in wall_horiz]
poly_wv = [Point(x, y, 0) for (x, y) in wall_verti]
# create polygons for the two long/parallel walls
s = [(0, -width/2.0+ww/2.0, 0), poly_wh]
n = [(0, +width/2.0-ww/2.0, 0), poly_wh]
w = [(-length/2.0+ww/2.0, 0, 0), poly_wv]
e = [(+length/2.0-ww/2.0, 0, 0), poly_wv]
polys = []
if 'e' in edges: polys.append(e)
if 'n' in edges: polys.append(n)
if 'w' in edges: polys.append(w)
if 's' in edges: polys.append(s)
# now we need an arc for each corner
ww2 = ww/2.0
arc_tl = create_arc_with_width(cx=-lhz/2.0+ww2, cy=+lvt/2.0-ww2,
radius=corner_rad, theta_0=pi/2.0,
theta_end=pi, width=ww)
arc_tr = create_arc_with_width(cx=+lhz/2.0-ww2, cy=+lvt/2.0-ww2,
radius=corner_rad, theta_0=pi/2.0,
theta_end=0, width=ww)
arc_bl = create_arc_with_width(cx=-lhz/2.0+ww2, cy=-lvt/2.0+ww2,
radius=corner_rad, theta_0=pi,
theta_end=1.5*pi, width=ww)
arc_br = create_arc_with_width(cx=+lhz/2.0-ww2, cy=-lvt/2.0+ww2,
radius=corner_rad, theta_0=0,
theta_end=-pi/2.0, width=ww)
# compile a list of segments
segs = []
for origin, poly in polys:
# create relevant polygon with correct offset/position
(xo, yo, theta) = origin
ctr = Point(poly[0].x, poly[0].y) # rotate segment about its bottom left corner
seg = rotate_polygon(poly, ctr, theta) # do the rotation
seg = translate_seq(seg, dx=xo, dy=yo) # now translate the segment
segs.append(seg)
if 'e' in edges and 'n' in edges: segs += arc_tr
if 'e' in edges and 's' in edges: segs += arc_br
if 'w' in edges and 'n' in edges: segs += arc_tl
if 'w' in edges and 's' in edges: segs += arc_bl
self.segs = segs
### compute the bounds within which agent bees can be spawned ###
# for simplicity, we assume that the valid zone is only between the
# parallel section, since random generation with curved bounds is likely
# going to be a pain (unless we do generate & test - then have to write
# something to do a 'hit test')
k = bee_len /2.0 # don't allow bees to be spawned in the wall
s_x, s_y, s_yaw = s[0]
n_x, n_y, n_yaw = n[0]
self.bl_bound = (s_x + poly_wh[0].x + k, s_y + poly_wh[3].y + k )
self.tr_bound = (n_x + poly_wh[2].x - k, n_y + poly_wh[1].y - k )
#}}}
#{{{ CircleArena
class CircleArena(BaseArena):
def __init__(self, radius=15.5, arc_steps=36, ww=1.0, bee_len=1.5,
**kwargs):
'''
A circular arena, corresponding to an enclosure used in the Graz bee
lab, suitable for four CASUs.
By default, this arena will be positioned horizontally, about (0, 0).
Transforms can be applied.
'''
super(CircleArena, self).__init__(ww=ww, **kwargs)
self.radius = radius
# compute geometry
arc_rad = self.radius - ww
# can we do it with a single arc?
arc_t = create_arc_with_width(cx=0, cy=0, radius=arc_rad, theta_0=0,
theta_end=2*pi,
steps=arc_steps, width=ww)
self.segs = arc_t
### compute the bounds within which agent bees can be spawned ###
# for simplicity, we define a square inside the circle that is valid.
k = bee_len /2.0 # don't allow bees to be spawned in the wall
# the side length of the square inside the cirle is sqrt(2)*r
_dim = (2.0**0.5 * radius * 0.5 ) - k - ww
self.bl_bound = (-_dim, -_dim)
self.tr_bound = (+_dim, +_dim)
#}}}
| lgpl-3.0 |
vinodkc/spark | python/pyspark/sql/dataframe.py | 4 | 100392 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3.0
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Examples
--------
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Examples
--------
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.",
FutureWarning
)
self._jdf.createOrReplaceTempView(name)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.1.0
Examples
--------
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
.. versionadded:: 2.2.0
Examples
--------
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except Exception as e:
raise ValueError(
"Unable to parse datatype from schema. %s" % e) from e
return self._schema
def printSchema(self):
"""Prints out the schema in the tree format.
.. versionadded:: 1.3.0
Examples
--------
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 1.3.0
parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise ValueError("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
return self._jdf.isStreaming()
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool or int, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if not isinstance(n, int) or isinstance(n, bool):
raise TypeError("Parameter 'n' (number of rows) must be an int")
if not isinstance(vertical, bool):
raise TypeError("Parameter 'vertical' must be a bool")
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
try:
int_truncate = int(truncate)
except ValueError:
raise TypeError(
"Parameter 'truncate={}' should be either bool or int.".format(truncate))
print(self._jdf.showString(n, int_truncate, vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
.. versionadded:: 2.1.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
Parameters
----------
eventTime : str
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Notes
-----
This API is evolving.
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.count()
2
"""
return int(self._jdf.count())
def collect(self):
"""Returns all the records as a list of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
Examples
--------
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
def limit(self, num):
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
Examples
--------
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
Examples
--------
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
.. versionadded:: 2.1.0
Examples
--------
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
.. versionadded:: 1.4.0
Parameters
----------
numPartitions : int
specify the target number of partitions
Examples
--------
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Examples
--------
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
.. versionadded:: 2.4.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Notes
-----
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise TypeError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise TypeError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise TypeError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Examples
--------
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
def dtypes(self):
"""Returns all column names and their data types as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
def columns(self):
"""Returns all column names as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
.. versionadded:: 2.3.0
Parameters
----------
colName : str
string, column name specified as a regex.
Examples
--------
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise TypeError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
.. versionadded:: 1.3.0
Parameters
----------
alias : str
an alias name to be set for the :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
.. versionadded:: 2.1.0
Parameters
----------
other : :class:`DataFrame`
Right side of the cartesian product.
Examples
--------
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
.. versionadded:: 1.3.1
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Use summary for expanded statistics and control over which statistics to compute.
Examples
--------
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
See Also
--------
DataFrame.summary
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
def first(self):
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Examples
--------
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
Examples
--------
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
Examples
--------
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
Examples
--------
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy().agg()``).
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
.. versionadded:: 2.3.0
Examples
--------
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``, the set of column names
in this and other :class:`DataFrame` can differ; missing columns will be filled with null.
Further, the missing columns of this :class:`DataFrame` will be added at the end
in the schema of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Examples
--------
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise TypeError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise TypeError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise TypeError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise TypeError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
.. versionadded:: 2.0.0
Parameters
----------
col: str, tuple or list
Can be a single column name, or a list of names for multiple columns.
.. versionchanged:: 2.2
Added support for multiple columns.
probabilities : list or tuple
a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError : float
The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
Returns
-------
list
the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
"""
if not isinstance(col, (str, list, tuple)):
raise TypeError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise TypeError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise TypeError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)):
raise TypeError("relativeError should be numerical (float, int)")
if relativeError < 0:
raise ValueError("relativeError should be >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
method : str, optional
The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column. Distinct items will make the first item of
each row.
col2 : str
The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise TypeError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
Examples
--------
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
if not isinstance(col, Column):
raise TypeError("col should be Column")
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
.. versionadded:: 1.3.0
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Examples
--------
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Examples
--------
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
Parameters
----------
cols : str
new column names
Examples
--------
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. versionadded:: 3.1.0
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise TypeError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Jimmy-Morzaria/scikit-learn | sklearn/metrics/tests/test_regression.py | 31 | 3010 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred)
assert_almost_equal(error, 1 - 5. / 2)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2 = _check_reg_targets(y1, y2)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2)
| bsd-3-clause |
necozay/tulip-control | examples/developer/fuel_tank/continuous_switched_test.py | 1 | 2638 | """test hybrid construction"""
import logging
logging.basicConfig(level=logging.INFO)
import time
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from tulip import abstract, hybrid
from polytope import box2poly
input_bound = 0.4
uncertainty = 0.05
cont_state_space = box2poly([[0., 3.], [0., 2.]])
cont_props = {}
cont_props['home'] = box2poly([[0., 1.], [0., 1.]])
cont_props['lot'] = box2poly([[2., 3.], [1., 2.]])
sys_dyn = dict()
allh = [0.5, 1.1, 1.5]
modes = []
modes.append(('normal', 'fly'))
modes.append(('refuel', 'fly'))
modes.append(('emergency', 'fly'))
"""First PWA mode"""
def subsys0(h):
A = np.array([[1.1052, 0.], [ 0., 1.1052]])
B = np.array([[1.1052, 0.], [ 0., 1.1052]])
E = np.array([[1,0], [0,1]])
U = box2poly([[-1., 1.], [-1., 1.]])
U.scale(input_bound)
W = box2poly([[-1., 1.], [-1., 1.]])
W.scale(uncertainty)
dom = box2poly([[0., 3.], [h, 2.]])
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, dom)
return sys_dyn
def subsys1(h):
A = np.array([[0.9948, 0.], [0., 1.1052]])
B = np.array([[-1.1052, 0.], [0., 1.1052]])
E = np.array([[1, 0], [0, 1]])
U = box2poly([[-1., 1.], [-1., 1.]])
U.scale(input_bound)
W = box2poly([[-1., 1.], [-1., 1.]])
W.scale(uncertainty)
dom = box2poly([[0., 3.], [0., h]])
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, dom)
return sys_dyn
for mode, h in zip(modes, allh):
subsystems = [subsys0(h), subsys1(h)]
sys_dyn[mode] = hybrid.PwaSysDyn(subsystems, cont_state_space)
"""Switched Dynamics"""
# collect env, sys_modes
env_modes, sys_modes = zip(*modes)
msg = 'Found:\n'
msg += '\t Environment modes: ' + str(env_modes)
msg += '\t System modes: ' + str(sys_modes)
switched_dynamics = hybrid.SwitchedSysDyn(
disc_domain_size=(len(env_modes), len(sys_modes)),
dynamics=sys_dyn,
env_labels=env_modes,
disc_sys_labels=sys_modes,
cts_ss=cont_state_space
)
print(switched_dynamics)
ppp = abstract.prop2part(cont_state_space, cont_props)
ppp, new2old = abstract.part2convex(ppp)
"""Discretize to establish transitions"""
start = time.time()
N = 8
trans_len=1
disc_params = {}
for mode in modes:
disc_params[mode] = {'N':N, 'trans_length':trans_len}
swab = abstract.multiproc_discretize_switched(
ppp, switched_dynamics, disc_params,
plot=True, show_ts=True
)
print(swab)
axs = swab.plot(show_ts=True)
for i, ax in enumerate(axs):
ax.figure.savefig('swab_' + str(i) + '.pdf')
#ax = sys_ts.ts.plot()
elapsed = (time.time() - start)
print('Discretization lasted: ' + str(elapsed))
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
TakakiNishio/grasp_planning | depth/random_planning_for_motoman/planning_average.py | 2 | 4288 | # -*- coding: utf-8 -*-
#python library
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import *
import sys
from scipy import misc
import shutil
import os
import random
import time
# OpenCV
import cv2
#chainer library
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer import serializers
#python script
import network_structure as nn
import pickup_object as po
import random_rectangle as rr
import path as p
# label preparation
def label_handling(data_label_1,data_label_2):
data_label = []
if data_label_1 < 10 :
data_label.append(str(0)+str(data_label_1))
else:
data_label.append(str(data_label_1))
if data_label_2 < 10 :
data_label.append(str(0)+str(data_label_2))
else:
data_label.append(str(data_label_2))
return data_label
# load the depth image data
def load_depth_image(path,scale):
img = cv2.imread(path)
grayed = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized_img = cv2.resize(grayed,(img.shape[0]/scale,img.shape[1]/scale))
img_array = np.asanyarray(resized_img,dtype=np.float32)
img_shape = img_array.shape
img_array = np.reshape(img_array,(resized_img.size,1))
img_list = []
for i in range(len(img_array)):
img_list.append(img_array[i][0]/255.0)
return img,img_list
# generate input data for CNN
def generate_input_data(path,rec_list,scale):
img,img_list = load_depth_image(path,scale)
x = rec_list + img_list
x = np.array(x,dtype=np.float32).reshape((1,33008))
return x,img
# calculate z (gripper height)
def calculate_z(path,rec,scale):
img = cv2.imread(path)
grayed = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_array = np.asarray(grayed)
x1 = int(rec[1]*scale)
y1 = int(rec[0]*scale)
x2 = int(rec[5]*scale)
y2 = int(rec[4]*scale)
ml = np.max(img_array[np.min([x1,x2]):np.max([x1,x2]),np.min([y1,y2]):np.max([y1,y2])])
z =round(ml*(150/255.0),2)
return z
# draw the object area
def draw_object_area(img,object_area,rec_area):
for i in range(len(rec_area)):
for j in range(len(object_area[i])):
for k in range(2):
object_area[i][j][0][k] = object_area[i][j][0][k] - 100
cv2.rectangle(img, (rec_area[i][0]-100, rec_area[i][1]-100), (rec_area[i][0]+rec_area[i][2]-100, rec_area[i][1]+rec_area[i][3]-100), (255,0,0), 1)
cv2.drawContours(img, object_area, -1, (255,0,255),1)
# draw the grasp rectangle
def draw_grasp_rectangle(img,rec,scale):
color = [(0,255,255), (0,255,0)]
rec = (np.array(rec)*scale).astype(np.int32)
cv2.line(img,(rec[0],rec[1]),(rec[2],rec[3]), color[0], 2)
cv2.line(img,(rec[2],rec[3]),(rec[4],rec[5]), color[1], 2)
cv2.line(img,(rec[4],rec[5]),(rec[6],rec[7]), color[0], 2)
cv2.line(img,(rec[6],rec[7]),(rec[0],rec[1]), color[1], 2)
plt.figure(1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.axis('off')
cv2.imwrite('pictures/depth.png',img)
#main
if __name__ == '__main__':
#directory_n = input('Directory No > ')
#picture_n = input('Image No > ')
# random checking
directory_n = randint(9)+1
picture_n = randint(40)+1
scale = 2
print 'directory:'+str(directory_n)+' picture:'+str(picture_n)
data_label = label_handling(directory_n,picture_n)
path = p.data_path()+data_label[0]+'/dp'+data_label[0]+data_label[1]+'r.png'
model = nn.CNN_classification()
serializers.load_npz('cnn.model', model)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
search_area,rec_area = po.find_object_from_RGB(data_label)
while(1):
rec,center,angle,w,p1,p2,p3,p4 = rr.random_rec(search_area,rec_area,scale)
x,img = generate_input_data(path,rec,scale)
test_output = model.forward(chainer.Variable(x))
test_label = np.argmax(test_output.data[0])
if test_label == 1:
break
angle = round(angle*180/np.pi,2)
z = calculate_z(path,rec,scale)
print '\n'+'(xc,yc): '+str(center)+', zc[mm]: '+str(z)
print 'theta[deg]: '+str(angle)+', gripper_width: '+str(w)+'\n'
draw_object_area(img,search_area,rec_area)
draw_grasp_rectangle(img,rec,scale)
plt.show()
| gpl-3.0 |
python27/NetworkControllability | NetworkControllability/test.py | 1 | 2990 | import numpy as np
import networkx as nx
import exact_controllability as ECT
import strutral_controllability as SCT
import matplotlib.pyplot as plt
import matplotlib.pylab as plb
import csv
import operator
import random
import os
import subprocess
import threading
import time
import math
import UtilityFunctions as UF
#G = nx.Graph()
#G.add_nodes_from([0, 1, 2, 3])
#G.add_edge(0, 1)
#G.add_edge(0, 2)
#G.add_edge(0, 3)
#G.add_edge(1, 2)
#print nx.transitivity(G)
#DG = G.to_directed()
#print DG.edges()
def ReadPajek(filename):
'''Read pajek file to construct Graph'''
G = nx.Graph()
fp = open(filename, 'r')
line = fp.readline()
while line:
if line[0] == '*':
line = line.strip().split()
#print "line = ", line
label = line[0]
number = int(line[1])
if label == '*Vertices' or label == '*vertices':
NodeNum = number
for i in range(NodeNum):
NodeLine = fp.readline()
NodeLine = NodeLine.strip().split()
NodeID = int(NodeLine[0])
NodeLabel = NodeLine[1]
G.add_node(NodeID)
elif label == '*Edges' or label == '*edges':
EdgeNum = number
for i in range(EdgeNum):
EdgeLine = fp.readline()
EdgeLine = EdgeLine.strip().split()
u = int(EdgeLine[0])
v = int(EdgeLine[1])
#w = float(EdgeLine[2])
G.add_edge(u, v)
else:
pass
line = fp.readline()
fp.close()
return G
if __name__ == "__main__":
"""
NOTE: The core effects on controllability on USAir97 is calculated
by PAJEK software not using NetworkX
"""
#G = ReadPajek("dataset/TEST_USAir97.net")
M = ReadPajek("dataset/Erdos971_revised.net")
G = max(nx.connected_component_subgraphs(M),key=len)
N = G.number_of_nodes()
L = G.number_of_edges()
remove_fraction = 0.20
N_rm = int(N * remove_fraction)
nodes = nx.nodes(G)
print "Before Removing:\n"
print "N = ", N
print "L = ", L
random.shuffle(nodes)
rm_nodes = nodes[0:N_rm]
G.remove_nodes_from(rm_nodes)
print "\n after removing:"
print "N = ", G.number_of_nodes()
print "L = ", G.number_of_edges()
print "<k> = ", float(2 * G.number_of_edges()) / G.number_of_nodes()
avg_deg = UF.average_degree(G);
print "<k>: ", avg_deg;
avg_bet = UF.average_betweenness_centrality(G);
print "<B>: ", avg_bet;
#APL = nx.average_shortest_path_length(G)
#print "APL: ", APL
N_core = nx.core_number(G)
core_values = N_core.values()
leaves = [x for x in core_values if x <= 1]
print "#leaves:", len(leaves)
print "#core:", G.number_of_nodes() - len(leaves)
print "#core/#N:", (G.number_of_nodes() - len(leaves))/float(G.number_of_nodes())
| bsd-2-clause |
krischer/wfdiff | doc/convert.py | 3 | 1940 | #! /usr/bin/env python
"""
Convert empty IPython notebook to a sphinx doc page.
"""
import io
import os
import sys
from IPython.nbformat import current
def clean_for_doc(nb):
"""
Cleans the notebook to be suitable for inclusion in the docs.
"""
new_cells = []
for cell in nb.worksheets[0].cells:
# Remove the pylab inline line.
if "input" in cell and cell["input"].strip() == "%pylab inline":
continue
# Remove output resulting from the stream/trace method chaining.
if "outputs" in cell:
outputs = [_i for _i in cell["outputs"] if "text" not in _i or
not _i["text"].startswith("<obspy.core")]
cell["outputs"] = outputs
new_cells.append(cell)
nb.worksheets[0].cells = new_cells
return nb
def strip_output(nb):
"""
strip the outputs from a notebook object
"""
for cell in nb.worksheets[0].cells:
if 'outputs' in cell:
cell['outputs'] = []
if 'prompt_number' in cell:
cell['prompt_number'] = None
return nb
def convert_nb(nbname):
os.system("runipy --o %s.ipynb --matplotlib --quiet" % nbname)
os.system("rm -rf ./index_files")
filename = "%s.ipynb" % nbname
with io.open(filename, 'r', encoding='utf8') as f:
nb = current.read(f, 'json')
nb = clean_for_doc(nb)
print("Writing to", filename)
with io.open(filename, 'w', encoding='utf8') as f:
current.write(nb, f, 'json')
os.system("ipython nbconvert --to rst %s.ipynb" % nbname)
filename = "%s.ipynb" % nbname
with io.open(filename, 'r', encoding='utf8') as f:
nb = current.read(f, 'json')
nb = strip_output(nb)
print("Writing to", filename)
with io.open(filename, 'w', encoding='utf8') as f:
current.write(nb, f, 'json')
if __name__ == "__main__":
for nbname in sys.argv[1:]:
convert_nb(nbname)
| gpl-3.0 |
HeraclesHX/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
dingocuster/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.3/tutorials/solver_times.py | 2 | 9274 | #!/usr/bin/env python
# coding: utf-8
# # Advanced: solver_times
# ## Setup
#
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# Let's get started with some basic imports
# In[2]:
import phoebe
import numpy as np
import matplotlib.pyplot as plt
# And then we'll build a synthetic "dataset" and initialize a new bundle with those data
# In[3]:
b = phoebe.default_binary()
b.add_dataset('lc', times=phoebe.linspace(0,5,1001))
b.add_compute('ellc', compute='ellc01')
b.set_value_all('ld_mode', 'lookup')
b.run_compute('ellc01')
times = b.get_value('times@model')
fluxes = b.get_value('fluxes@model')
sigmas = np.ones_like(times) * 0.01
b = phoebe.default_binary()
b.add_dataset('lc', compute_phases=phoebe.linspace(0,1,101),
times=times, fluxes=fluxes, sigmas=sigmas)
b.add_compute('ellc', compute='ellc01')
b.set_value_all('ld_mode', 'lookup')
b.add_solver('optimizer.nelder_mead', compute='ellc01', fit_parameters=['teff'], solver='nm_solver')
# ## solver_times parameter and options
# In[4]:
print(b.filter(qualifier='solver_times'))
# In[5]:
print(b.get_parameter(qualifier='solver_times', dataset='lc01').choices)
# The logic for solver times is generally only used internally within [b.run_solver](../api/phoebe.frontend.bundle.Bundle.run_solver.md) (for optimizers and samplers which require a forward-model to be computed). However, it is useful (in order to diagnose any issues, for example) to be able to see how the combination of `solver_times`, `times`, `compute_times`/`compute_phases`, `mask_enabled`, and `mask_phases` will be interpretted within PHOEBE during [b.run_solver](../api/phoebe.bundle.Bundle.run_solver.md).
#
# See also:
# * [Advanced: mask_phases](./mask_phases.ipynb)
# * [Advanced: Compute Times & Phases](./compute_times_phases.ipynb)
#
# To access the underlying times that would be used, we can call [b.parse_solver_times](../api/phoebe.frontend.bundle.Bundle.parse_solver_times.md). Let's first look at the docstring (also available from the link above):
# In[6]:
help(b.parse_solver_times)
# Additionally, we can pass `solver` to [b.run_compute](../api/phoebe.frontend.bundle.Bundle.run_compute.md) to have the forward-model computed as it would be within the solver itself (this just calls `run_compute` with the compute option referenced by the solver and with the parsed `solver_times`).
#
# Below we'll go through each of the scenarios listed above and demonstrate how that logic changes the times at which the forward model will be computed within [b.run_solver](../api/phoebe.frontend.bundle.Bundle.run_solver.md) (with the cost-function interpolating between the resulting forward-model and the observations as necessary).
#
# The messages regarding the internal choice of logic for `solver_times` will be exposed at the 'info' level of the logger. We'll leave that off here to avoid the noise of the logger messages from `run_compute` calls, but you can uncomment the following line to see those messages.
# In[7]:
#logger = phoebe.logger('info')
# ## solver_times = 'times'
# ### without phase_mask enabled
# In[8]:
b.set_value('solver_times', 'times')
b.set_value('compute_phases', phoebe.linspace(0,1,101))
b.set_value('mask_enabled', False)
b.set_value('dperdt', 0.0)
# In[9]:
dataset_times = b.get_value('times', context='dataset')
_ = plt.plot(times, np.ones_like(times)*1, 'k.')
compute_times = b.get_value('compute_times', context='dataset')
_ = plt.plot(compute_times, np.ones_like(compute_times)*2, 'b.')
solver_times = b.parse_solver_times()
print(solver_times)
_ = plt.plot(solver_times['lc01'], np.ones_like(solver_times['lc01'])*3, 'g.')
# In[10]:
b.run_compute(solver='nm_solver')
_ = b.plot(show=True)
# ### with phase_mask enabled
# In[11]:
b.set_value('solver_times', 'times')
b.set_value('compute_phases', phoebe.linspace(0,1,101))
b.set_value('mask_enabled', True)
b.set_value('mask_phases', [(-0.1, 0.1), (0.45,0.55)])
b.set_value('dperdt', 0.0)
# In[12]:
dataset_times = b.get_value('times', context='dataset')
_ = plt.plot(times, np.ones_like(times)*1, 'k.')
compute_times = b.get_value('compute_times', context='dataset')
_ = plt.plot(compute_times, np.ones_like(compute_times)*2, 'b.')
solver_times = b.parse_solver_times()
_ = plt.plot(solver_times['lc01'], np.ones_like(solver_times['lc01'])*3, 'g.')
# In[13]:
b.run_compute(solver='nm_solver')
_ = b.plot(show=True)
# ## solver_times = 'compute_times'
# ### without phase_mask enabled
# In[14]:
b.set_value('solver_times', 'compute_times')
b.set_value('compute_phases', phoebe.linspace(0,1,101))
b.set_value('mask_enabled', False)
b.set_value('dperdt', 0.0)
# In[15]:
dataset_times = b.get_value('times', context='dataset')
_ = plt.plot(times, np.ones_like(times)*1, 'k.')
compute_times = b.get_value('compute_times', context='dataset')
_ = plt.plot(compute_times, np.ones_like(compute_times)*2, 'b.')
solver_times = b.parse_solver_times()
_ = plt.plot(solver_times['lc01'], np.ones_like(solver_times['lc01'])*3, 'g.')
# In[16]:
b.run_compute(solver='nm_solver')
_ = b.plot(show=True)
# ### with phase_mask enabled and time-independent hierarchy
# In[17]:
b.set_value('solver_times', 'compute_times')
b.set_value('compute_phases', phoebe.linspace(0,1,101))
b.set_value('mask_enabled', True)
b.set_value('mask_phases', [(-0.1, 0.1), (0.45,0.55)])
b.set_value('dperdt', 0.0)
# In[18]:
dataset_times = b.get_value('times', context='dataset')
_ = plt.plot(times, np.ones_like(times)*1, 'k.')
compute_times = b.get_value('compute_times', context='dataset')
_ = plt.plot(compute_times, np.ones_like(compute_times)*2, 'b.')
solver_times = b.parse_solver_times()
_ = plt.plot(solver_times['lc01'], np.ones_like(solver_times['lc01'])*3, 'g.')
# In[19]:
b.run_compute(solver='nm_solver')
_ = b.plot(show=True)
# ### with phase_mask enabled and time-dependent hierarchy
# In[20]:
b.set_value('solver_times', 'compute_times')
b.set_value('compute_phases', phoebe.linspace(0,1,101))
b.set_value('mask_enabled', True)
b.set_value('mask_phases', [(-0.1, 0.1), (0.45,0.55)])
b.set_value('dperdt', 0.1)
print(b.hierarchy.is_time_dependent())
# In the case where we have a time-dependent system [b.run_solver](../api/phoebe.frontend.bundle.Bundle.run_solver.md) will fail with an error from [b.run_checks_solver](../api/phoebe.frontend.bundle.Bundle.run_checks_solver.md) if `compute_times` does not fully encompass the dataset times.
# In[21]:
print(b.run_checks_solver())
# This will always be the case when providing `compute_phases` when the dataset times cover more than a single cycle. Here we'll follow the advice from the error and provide `compute_times` instead.
# In[22]:
b.flip_constraint('compute_times', solve_for='compute_phases')
b.set_value('compute_times', phoebe.linspace(0,5,501))
# In[23]:
print(b.run_checks_solver())
# In[24]:
dataset_times = b.get_value('times', context='dataset')
_ = plt.plot(times, np.ones_like(times)*1, 'k.')
compute_times = b.get_value('compute_times', context='dataset')
_ = plt.plot(compute_times, np.ones_like(compute_times)*2, 'b.')
solver_times = b.parse_solver_times()
_ = plt.plot(solver_times['lc01'], np.ones_like(solver_times['lc01'])*3, 'g.')
# In[25]:
b.run_compute(solver='nm_solver')
_ = b.plot(show=True)
# Now we'll just flip the constraint back for the remaining examples
# In[26]:
_ = b.flip_constraint('compute_phases', solve_for='compute_times')
# ## solver_times = 'auto'
#
# `solver_times='auto'` determines the times array under both conditions (`solver_times='times'` and `solver_times='compute_times'`) and ultimately chooses whichever of the two is shorter.
#
# To see this, we'll stick with the no-mask, time-independent case and change the length of `compute_phases` to show the switch to the shorter of the available options.
# ### compute_times shorter
# In[27]:
b.set_value('solver_times', 'auto')
b.set_value('compute_phases', phoebe.linspace(0,1,101))
b.set_value('mask_enabled', False)
b.set_value('dperdt', 0.0)
# In[28]:
dataset_times = b.get_value('times', context='dataset')
_ = plt.plot(times, np.ones_like(times)*1, 'k.')
compute_times = b.get_value('compute_times', context='dataset')
_ = plt.plot(compute_times, np.ones_like(compute_times)*2, 'b.')
solver_times = b.parse_solver_times()
_ = plt.plot(solver_times['lc01'], np.ones_like(solver_times['lc01'])*3, 'g.')
# In[29]:
b.run_compute(solver='nm_solver')
_ = b.plot(show=True)
# ### times shorter
# In[30]:
b.set_value('solver_times', 'auto')
b.set_value('compute_phases', phoebe.linspace(0,1,2001))
b.set_value('mask_enabled', False)
b.set_value('dperdt', 0.0)
# In[31]:
dataset_times = b.get_value('times', context='dataset')
_ = plt.plot(times, np.ones_like(times)*1, 'k.')
compute_times = b.get_value('compute_times', context='dataset')
_ = plt.plot(compute_times, np.ones_like(compute_times)*2, 'b.')
solver_times = b.parse_solver_times()
_ = plt.plot(solver_times['lc01'], np.ones_like(solver_times['lc01'])*3, 'g.')
# In[32]:
b.run_compute(solver='nm_solver')
_ = b.plot(show=True)
| gpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/tests/test_delaunay.py | 14 | 7090 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.cbook import MatplotlibDeprecationWarning
with warnings.catch_warnings():
# the module is deprecated. The tests should be removed when the module is.
warnings.simplefilter('ignore', MatplotlibDeprecationWarning)
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_2d_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.__name__, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'],
freetype_version=('2.4.5', '2.4.9'),
remove_text=True)
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = str('test_%s' % func.__name__)
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.__name__] = make_test(func)
make_all_2d_testfuncs()
# 1d and 0d grid tests
ref_interpolator = Triangulation([0,10,10,0],
[0,0,10,10]).linear_interpolator([1,10,5,2.0])
def test_1d_grid():
res = ref_interpolator[3:6:2j,1:1:1j]
assert np.allclose(res, [[1.6],[1.9]], rtol=0)
def test_0d_grid():
res = ref_interpolator[3:3:1j,1:1:1j]
assert np.allclose(res, [[1.6]], rtol=0)
@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])
def test_1d_plots():
x_range = slice(0.25,9.75,20j)
x = np.mgrid[x_range]
ax = plt.gca()
for y in xrange(2,10,2):
plt.plot(x, ref_interpolator[x_range,y:y:1j])
ax.set_xticks([])
ax.set_yticks([])
| mit |
TomAugspurger/pandas | pandas/tests/series/methods/test_to_timestamp.py | 1 | 2642 | from datetime import timedelta
import pytest
from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
result = series.to_timestamp(how="start")
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq="A-DEC"):
return date_range(
to_datetime("1/1/2001") + delta,
to_datetime("12/31/2009") + delta,
freq=freq,
)
delta = timedelta(hours=23)
result = series.to_timestamp("H", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp("T", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
result = series.to_timestamp("S", "end")
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
def test_to_timestamp_raises(self, indices):
# https://github.com/pandas-dev/pandas/issues/33327
index = indices
ser = Series(index=index, dtype=object)
if not isinstance(index, PeriodIndex):
msg = f"unsupported Type {type(index).__name__}"
with pytest.raises(TypeError, match=msg):
ser.to_timestamp()
| bsd-3-clause |
pytroll/pyspectral | setup.py | 2 | 3947 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2020 Pytroll
# Author(s):
# Adam Dybbroe <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from setuptools import setup, find_packages
import os.path
try:
# HACK: https://github.com/pypa/setuptools_scm/issues/190#issuecomment-351181286
# Stop setuptools_scm from including all repository files
import setuptools_scm.integration
setuptools_scm.integration.find_files = lambda _: []
except ImportError:
pass
description = ('Reading and manipulaing satellite sensor spectral responses and the '
'solar spectrum, to perfom various corrections to VIS and NIR band data')
try:
with open('./README', 'r') as fd:
long_description = fd.read()
except IOError:
long_description = ''
requires = ['docutils>=0.3', 'numpy>=1.5.1', 'scipy>=0.14',
'python-geotiepoints>=1.1.1',
'h5py>=2.5', 'requests', 'six', 'pyyaml',
'appdirs']
dask_extra = ['dask[array]']
test_requires = ['pyyaml', 'dask[array]', 'xlrd', 'pytest', 'xarray']
if sys.version < '3.0':
test_requires.append('mock')
try:
# This is needed in order to let the unittests pass
# without complaining at the end on certain systems
import multiprocessing
except ImportError:
pass
NAME = 'pyspectral'
setup(name=NAME,
description=description,
author='Adam Dybbroe',
author_email='[email protected]',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 ' +
'or later (GPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'],
url='https://github.com/pytroll/pyspectral',
long_description=long_description,
license='GPLv3',
packages=find_packages(),
include_package_data=True,
package_data={
# If any package contains *.txt files, include them:
'': ['*.txt', '*.det'],
'pyspectral': [os.path.join('etc', 'pyspectral.yaml'),
os.path.join('data', '*.dat'),
os.path.join('data', '*.XLS'),
'data/modis/terra/Reference_RSR_Dataset/*.det'],
},
# Project should use reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=requires,
extras_require={'xlrd': ['xlrd'], 'trollsift': ['trollsift'],
'matplotlib': ['matplotlib'],
'pandas': ['pandas'],
'tqdm': ['tqdm'],
'dask': dask_extra},
scripts=['bin/plot_rsr.py', 'bin/composite_rsr_plot.py',
'bin/download_atm_correction_luts.py',
'bin/download_rsr.py'],
data_files=[('share', ['pyspectral/data/e490_00a.dat',
'pyspectral/data/MSG_SEVIRI_Spectral_Response_Characterisation.XLS'])],
test_suite='pyspectral.tests.suite',
tests_require=test_requires,
python_requires='>=3.7',
zip_safe=False,
use_scm_version=True
)
| gpl-3.0 |
juhi24/baecc | scripts/2016paper/scr_d0-rho_combined.py | 1 | 4672 | # -*- coding: utf-8 -*-
"""
@author: Jussi Tiira
"""
import snowfall as sf
import read
import numpy as np
import matplotlib.pyplot as plt
from os import path
import fit
from scipy.special import gamma
from scr_snowfall import param_table
debug = False
savepath = '../results/pip2015'
d0_col = 'D_0_gamma'
#plt.close('all')
plt.ioff()
fit_scale = [read.RHO_SCALE,1]
# dry and wet snows, from original
magono65 = fit.PolFit(params=[20, -2])
# "Fit of data from Magano and Nakamura (1965) for dry snowflakes", fit by Holroyd
magono65dry = fit.PolFit(params=[22, -1.5])
holroyd71 = fit.PolFit(params=[170, -1]) # from Brandes
# "Used by Schaller et al. (1982) for brightband modeling" from Fabry
schaller82 = fit.PolFit(params=[64, -0.65])
# from original
muramoto95 = fit.PolFit(params=[48, -0.406])
# "Measurements from Switzerland by Barthazy (1997, personal communication)" from Fabry
barthazy97 = fit.PolFit(params=[18, -0.8])
# from Brandes
fabry99 = fit.PolFit(params=[150, -1])
# from Brandes
heymsfield04 = fit.PolFit(params=[104, -0.95])
brandes07 = fit.PolFit(params=[178, -0.922])
color1 = 'gray'
color2 = 'red'
fits_to_plot = {#magono65: {'color':color1, 'linestyle':'--', 'label':'Magono and Nakamura (1965)'},
#magono65dry: {'label':'Magono and Nakamura (1965), dry snow'}
#holroyd71: {'color':color1, 'linestyle':':', 'label':'Holroyd (1971)'},
#schaller82: {'color':color1, 'linestyle':'-.', 'label':'Schaller et al. (1982)'},
#muramoto95: {'color':color1, 'linestyle':'-', 'label':'Muramoto et al. (1995)'},
#barthazy97: {'color':color, linestyle:'--', 'label':'Barthazy (1997)'},
#fabry99: {'color':color2, 'linestyle':'-', 'label':'Fabry and Szyrmer (1999)'},
#heymsfield04: {'color':color2, 'linestyle':'--', 'label':'Heymsfield et al. (2004)'},
brandes07: {'color':'black', 'linestyle':'--', 'label':'Brandes et al. (2007)'}}
def plot_density_histogram(data, bins=60, **kws):
ax = data.density.hist(bins=bins, **kws)
read.rho_scale(ax.xaxis)
ax.set_xlabel('bulk density')
ax.set_ylabel('frequency')
def prep_d0_rho(data):
rho_d0 = fit.PolFit(x=data[d0_col], y=data.density,
sigma=1/data['count'], xname='D_0',
disp_scale=fit_scale)
rho_d0.find_fit(loglog=True)
return rho_d0
def prepare_d0_rho(data):
rho_d0_cols = ['density',d0_col, 'count']
rho_d0_data = data.loc[:, rho_d0_cols].dropna()
rho_d0 = prep_d0_rho(rho_d0_data)
rho_d0_baecc = prep_d0_rho(rho_d0_data.loc['first'])
rho_d0_1415 = prep_d0_rho(rho_d0_data.loc['second'])
return rho_d0, rho_d0_baecc, rho_d0_1415
def plot_d0_rho(data):
plotkws = {'x': d0_col,
'y': 'density',
'sizecol': 'count',
#'groupby': 'case',
#'c': 'case',
'scale': 0.1,
'colorbar': False,
'xlim': [0.5,6],
'ylim': [0,450],
'alpha': 0.5}
ax = sf.plot_pairs(data.loc['first'], c=(.6, .6 , .92, .8), label='BAECC', **plotkws)
sf.plot_pairs(data.loc['second'], c=(.2, .92, .2, .8), ax=ax, label='winter 2014-2015',
**plotkws)
plt.tight_layout()
rho_d0, rho_d0_baecc, rho_d0_1415 = prepare_d0_rho(data)
rho_d0_baecc.plot(ax=ax)
rho_d0_1415.plot(ax=ax)
rho_d0.plot(ax=ax, color='black', label='all cases: $%s$' % str(rho_d0))
for key, kws in fits_to_plot.items():
key.plot(ax=ax, **kws)
read.rho_scale(ax.yaxis)
ax.set_ylabel('$\\rho$, ' + read.RHO_UNITS)
ax.set_xlabel('$D_0$, mm')
ax.set_xticks((0.5, 1, 2, 3, 4, 5, 6))
plt.legend()
return ax, rho_d0, rho_d0_baecc, rho_d0_1415
def mass_dim(rho_d0, b_v=0.2):
a_d0, b_d0 = rho_d0.params
a_d0 = a_d0/1000*10**b_d0
beta = b_d0 + 3
alpha = np.pi/6*3.67**b_d0*a_d0*gamma(b_v+4)/gamma(b_v+b_d0+4)
return fit.PolFit(params=(alpha, beta), xname='D')
data_fltr = param_table(debug=debug)
figkws = {'dpi': 150, 'figsize': (5,5)}
#fig = plt.figure(**figkws)
#ax = plot_d0_rho(data)
fig_fltr = plt.figure(**figkws)
ax_fltr, rho_d0, rho_d0_baecc, rho_d0_1415 = plot_d0_rho(data_fltr)
m_d = mass_dim(rho_d0)
m_d_baecc = mass_dim(rho_d0_baecc)
m_d_1415 = mass_dim(rho_d0_1415)
if debug:
savepath += '/test'
paperpath = path.join(savepath, 'paper')
read.ensure_dir(paperpath)
#fig.savefig(path.join(savepath, 'rho_d0_combined.eps'))
fig_fltr.savefig(path.join(savepath, 'rho_d0_combined_d0fltr.eps'))
fig_fltr.savefig(path.join(paperpath, 'd0_rho.eps'))
fig_fltr.savefig(path.join(paperpath, 'd0_rho.png')) | gpl-3.0 |
glouppe/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 13 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
Vishluck/sympy | sympy/interactive/session.py | 43 | 15119 | """Tools for setting up interactive sessions. """
from __future__ import print_function, division
from distutils.version import LooseVersion as V
from sympy.core.compatibility import range
from sympy.external import import_module
from sympy.interactive.printing import init_printing
preexec_source = """\
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
init_printing()
"""
verbose_message = """\
These commands were executed:
%(source)s
Documentation can be found at http://docs.sympy.org/%(version)s
"""
no_ipython = """\
Couldn't locate IPython. Having IPython installed is greatly recommended.
See http://ipython.scipy.org for more details. If you use Debian/Ubuntu,
just install the 'ipython' package and start isympy again.
"""
def _make_message(ipython=True, quiet=False, source=None):
"""Create a banner for an interactive session. """
from sympy import __version__ as sympy_version
from sympy.polys.domains import GROUND_TYPES
from sympy.utilities.misc import ARCH
from sympy import SYMPY_DEBUG
import sys
import os
python_version = "%d.%d.%d" % sys.version_info[:3]
if ipython:
shell_name = "IPython"
else:
shell_name = "Python"
info = ['ground types: %s' % GROUND_TYPES]
cache = os.getenv('SYMPY_USE_CACHE')
if cache is not None and cache.lower() == 'no':
info.append('cache: off')
if SYMPY_DEBUG:
info.append('debugging: on')
args = shell_name, sympy_version, python_version, ARCH, ', '.join(info)
message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args
if not quiet:
if source is None:
source = preexec_source
_source = ""
for line in source.split('\n')[:-1]:
if not line:
_source += '\n'
else:
_source += '>>> ' + line + '\n'
doc_version = sympy_version
if 'dev' in doc_version:
doc_version = "dev"
else:
doc_version = "%s.%s.%s/" % tuple(doc_version.split('.')[:3])
message += '\n' + verbose_message % {'source': _source,
'version': doc_version}
return message
def int_to_Integer(s):
"""
Wrap integer literals with Integer.
This is based on the decistmt example from
http://docs.python.org/library/tokenize.html.
Only integer literals are converted. Float literals are left alone.
Examples
========
>>> from __future__ import division
>>> from sympy.interactive.session import int_to_Integer
>>> from sympy import Integer
>>> s = '1.2 + 1/2 - 0x12 + a1'
>>> int_to_Integer(s)
'1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 '
>>> s = 'print (1/2)'
>>> int_to_Integer(s)
'print (Integer (1 )/Integer (2 ))'
>>> exec(s)
0.5
>>> exec(int_to_Integer(s))
1/2
"""
from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP
from sympy.core.compatibility import StringIO
def _is_int(num):
"""
Returns true if string value num (with token NUMBER) represents an integer.
"""
# XXX: Is there something in the standard library that will do this?
if '.' in num or 'j' in num.lower() or 'e' in num.lower():
return False
return True
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens
result.extend([
(NAME, 'Integer'),
(OP, '('),
(NUMBER, tokval),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
def enable_automatic_int_sympification(app):
"""
Allow IPython to automatically convert integer literals to Integer.
"""
hasshell = hasattr(app, 'shell')
import ast
if hasshell:
old_run_cell = app.shell.run_cell
else:
old_run_cell = app.run_cell
def my_run_cell(cell, *args, **kwargs):
try:
# Check the cell for syntax errors. This way, the syntax error
# will show the original input, not the transformed input. The
# downside here is that IPython magic like %timeit will not work
# with transformed input (but on the other hand, IPython magic
# that doesn't expect transformed input will continue to work).
ast.parse(cell)
except SyntaxError:
pass
else:
cell = int_to_Integer(cell)
old_run_cell(cell, *args, **kwargs)
if hasshell:
app.shell.run_cell = my_run_cell
else:
app.run_cell = my_run_cell
def enable_automatic_symbols(app):
"""Allow IPython to automatially create symbols (``isympy -a``). """
# XXX: This should perhaps use tokenize, like int_to_Integer() above.
# This would avoid re-executing the code, which can lead to subtle
# issues. For example:
#
# In [1]: a = 1
#
# In [2]: for i in range(10):
# ...: a += 1
# ...:
#
# In [3]: a
# Out[3]: 11
#
# In [4]: a = 1
#
# In [5]: for i in range(10):
# ...: a += 1
# ...: print b
# ...:
# b
# b
# b
# b
# b
# b
# b
# b
# b
# b
#
# In [6]: a
# Out[6]: 12
#
# Note how the for loop is executed again because `b` was not defined, but `a`
# was already incremented once, so the result is that it is incremented
# multiple times.
import re
re_nameerror = re.compile(
"name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined")
def _handler(self, etype, value, tb, tb_offset=None):
"""Handle :exc:`NameError` exception and allow injection of missing symbols. """
if etype is NameError and tb.tb_next and not tb.tb_next.tb_next:
match = re_nameerror.match(str(value))
if match is not None:
# XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion.
self.run_cell("%(symbol)s = Symbol('%(symbol)s')" %
{'symbol': match.group("symbol")}, store_history=False)
try:
code = self.user_ns['In'][-1]
except (KeyError, IndexError):
pass
else:
self.run_cell(code, store_history=False)
return None
finally:
self.run_cell("del %s" % match.group("symbol"),
store_history=False)
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if hasattr(app, 'shell'):
app.shell.set_custom_exc((NameError,), _handler)
else:
# This was restructured in IPython 0.13
app.set_custom_exc((NameError,), _handler)
def init_ipython_session(argv=[], auto_symbols=False, auto_int_to_Integer=False):
"""Construct new IPython session. """
import IPython
if V(IPython.__version__) >= '0.11':
# use an app to parse the command line, and init config
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal import ipapp
else:
from IPython.frontend.terminal import ipapp
app = ipapp.TerminalIPythonApp()
# don't draw IPython banner during initialization:
app.display_banner = False
app.initialize(argv)
if auto_symbols:
readline = import_module("readline")
if readline:
enable_automatic_symbols(app)
if auto_int_to_Integer:
enable_automatic_int_sympification(app)
return app.shell
else:
from IPython.Shell import make_IPython
return make_IPython(argv)
def init_python_session():
"""Construct new Python session. """
from code import InteractiveConsole
class SymPyConsole(InteractiveConsole):
"""An interactive console with readline support. """
def __init__(self):
InteractiveConsole.__init__(self)
try:
import readline
except ImportError:
pass
else:
import os
import atexit
readline.parse_and_bind('tab: complete')
if hasattr(readline, 'read_history_file'):
history = os.path.expanduser('~/.sympy-history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
return SymPyConsole()
def init_session(ipython=None, pretty_print=True, order=None,
use_unicode=None, use_latex=None, quiet=False, auto_symbols=False,
auto_int_to_Integer=False, argv=[]):
"""
Initialize an embedded IPython or Python session. The IPython session is
initiated with the --pylab option, without the numpy imports, so that
matplotlib plotting can be interactive.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: boolean or None
If True, use latex rendering if IPython GUI's;
if False, do not use latex rendering.
quiet: boolean
If True, init_session will not print messages regarding its status;
if False, init_session will print messages regarding its status.
auto_symbols: boolean
If True, IPython will automatically create symbols for you.
If False, it will not.
The default is False.
auto_int_to_Integer: boolean
If True, IPython will automatically wrap int literals with Integer, so
that things like 1/2 give Rational(1, 2).
If False, it will not.
The default is False.
ipython: boolean or None
If True, printing will initialize for an IPython console;
if False, printing will initialize for a normal console;
The default is None, which automatically determines whether we are in
an ipython instance or not.
argv: list of arguments for IPython
See sympy.bin.isympy for options that can be used to initialize IPython.
See Also
========
sympy.interactive.printing.init_printing: for examples and the rest of the parameters.
Examples
========
>>> from sympy import init_session, Symbol, sin, sqrt
>>> sin(x) #doctest: +SKIP
NameError: name 'x' is not defined
>>> init_session() #doctest: +SKIP
>>> sin(x) #doctest: +SKIP
sin(x)
>>> sqrt(5) #doctest: +SKIP
___
\/ 5
>>> init_session(pretty_print=False) #doctest: +SKIP
>>> sqrt(5) #doctest: +SKIP
sqrt(5)
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + x + y**2 + y
>>> init_session(order='grlex') #doctest: +SKIP
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + y**2 + x + y
>>> init_session(order='grevlex') #doctest: +SKIP
>>> y * x**2 + x * y**2 #doctest: +SKIP
x**2*y + x*y**2
>>> init_session(order='old') #doctest: +SKIP
>>> x**2 + y**2 + x + y #doctest: +SKIP
x + y + x**2 + y**2
>>> theta = Symbol('theta') #doctest: +SKIP
>>> theta #doctest: +SKIP
theta
>>> init_session(use_unicode=True) #doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
"""
import sys
in_ipython = False
if ipython is not False:
try:
import IPython
except ImportError:
if ipython is True:
raise RuntimeError("IPython is not available on this system")
ip = None
else:
if V(IPython.__version__) >= '0.11':
try:
ip = get_ipython()
except NameError:
ip = None
else:
ip = IPython.ipapi.get()
if ip:
ip = ip.IP
in_ipython = bool(ip)
if ipython is None:
ipython = in_ipython
if ipython is False:
ip = init_python_session()
mainloop = ip.interact
else:
if ip is None:
ip = init_ipython_session(argv=argv, auto_symbols=auto_symbols,
auto_int_to_Integer=auto_int_to_Integer)
if V(IPython.__version__) >= '0.11':
# runsource is gone, use run_cell instead, which doesn't
# take a symbol arg. The second arg is `store_history`,
# and False means don't add the line to IPython's history.
ip.runsource = lambda src, symbol='exec': ip.run_cell(src, False)
#Enable interactive plotting using pylab.
try:
ip.enable_pylab(import_all=False)
except Exception:
# Causes an import error if matplotlib is not installed.
# Causes other errors (depending on the backend) if there
# is no display, or if there is some problem in the
# backend, so we have a bare "except Exception" here
pass
if not in_ipython:
mainloop = ip.mainloop
readline = import_module("readline")
if auto_symbols and (not ipython or V(IPython.__version__) < '0.11' or not readline):
raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above with readline support")
if auto_int_to_Integer and (not ipython or V(IPython.__version__) < '0.11'):
raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above")
_preexec_source = preexec_source
ip.runsource(_preexec_source, symbol='exec')
init_printing(pretty_print=pretty_print, order=order,
use_unicode=use_unicode, use_latex=use_latex, ip=ip)
message = _make_message(ipython, quiet, _preexec_source)
if not in_ipython:
mainloop(message)
sys.exit('Exiting ...')
else:
ip.write(message)
import atexit
atexit.register(lambda ip: ip.write("Exiting ...\n"), ip)
| bsd-3-clause |
LunarLanding/Pythics | pythics/mpl.py | 1 | 70449 | # -*- coding: utf-8 -*-
#
# Copyright 2008 - 2014 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import multiprocessing
import numpy as np
#from PyQt4 import QtCore, QtGui
from pythics.settings import _TRY_PYSIDE
try:
if not _TRY_PYSIDE:
raise ImportError()
import PySide.QtCore as _QtCore
import PySide.QtGui as _QtGui
QtCore = _QtCore
QtGui = _QtGui
USES_PYSIDE = True
except ImportError:
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
import PyQt4.QtCore as _QtCore
import PyQt4.QtGui as _QtGui
QtCore = _QtCore
QtGui = _QtGui
USES_PYSIDE = False
import matplotlib
if USES_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as Toolbar
# the following import is necessary for 3-D plots in matplotlib although it is
# not directly used
from mpl_toolkits.mplot3d import Axes3D
import pythics.lib
import pythics.libcontrol
class Canvas(pythics.libcontrol.MPLControl):
"""Gives essentially complete acess to the matplotlib object oriented (OO)
API. Use this control when Plot2D and Chart2D don't give all the features
you need. All interaction with this control, except configuring the
callbacks, is done through the following three attributes:
- mpl.Canvas.figure: the matplotlib Figure
- mpl.Canvas.canvas: The matplotlib FigureCanvas.
- mpl.Canvas.toolbar: The matplotlib Toolbar (NavigationToolbar2QTAgg).
If you need access to the matplotlib library, use the Import control.
See the examples and matplotlib documentation for details. Configuration
of the callback functions (if needed) can be done through html parameters
or python attributes.
HTML parameters:
*toolbar*: [ *True* (default) | *False* ]
Whether to add a matplotlib toolbar below the plot.
*actions*: dict
a dictionarly of key:value pairs where the key is the name of a signal
and value is the function to run when the signal is emitted
actions in this control:
======================= ============================================
signal when emitted
======================= ============================================
'button_press_event' a mouse button is pressed
'button_release_event' a mouse button is released
'draw_event' canvas is redrawn
'key_press_event' a key is pressed
'key_release_event' a key is released
'motion_notify_event' the mouse is moved
'pick_event' an object in the canvas is selected
'resize_event' the figure canvas is resized
'scroll_event' the mouse scroll wheel is rolled
'figure_enter_event' the mouse enters a new figure
'figure_leave_event' the mouse leaves a figure
'axes_enter_event' the mouse enters a new axes
'axes_leave_event' the mouse leaves an axe
======================= ============================================
"""
def __init__(self, parent, toolbar=True, **kwargs):
pythics.libcontrol.MPLControl.__init__(self, parent, **kwargs)
self._widget = QtGui.QFrame()
vbox = QtGui.QVBoxLayout()
# plot
self.figure = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.figure)
vbox.addWidget(self.canvas)
# toolbar
if toolbar:
self.toolbar = Toolbar(self.canvas, self._widget)
vbox.addWidget(self.toolbar)
self._mpl_widget = self.canvas
self._widget.setLayout(vbox)
#
# Plot2D: plot panel with multiple plot types
#
class Plot2D(pythics.libcontrol.MPLControl):
"""An easy to use plotting control for 2-dimensional plotting.
Right click on the plot to save an image of the plot to a file.
HTML parameters:
*projection*: [ 'cartesian' (default) | 'polar' ]
Set to polar for polar plots (not all plot items supported).
*actions*: dict
a dictionarly of key:value pairs where the key is the name of a signal
and value is the function to run when the signal is emitted
actions in this control:
======================= ============================================
signal when emitted
======================= ============================================
'button_press_event' a mouse button is pressed
'button_release_event' a mouse button is released
'draw_event' canvas is redrawn
'key_press_event' a key is pressed
'key_release_event' a key is released
'motion_notify_event' the mouse is moved
'pick_event' an object in the canvas is selected
'resize_event' the figure canvas is resized
'scroll_event' the mouse scroll wheel is rolled
'figure_enter_event' the mouse enters a new figure
'figure_leave_event' the mouse leaves a figure
'axes_enter_event' the mouse enters a new axes
'axes_leave_event' the mouse leaves an axe
======================= ============================================
"""
def __init__(self, parent, projection='cartesian', **kwargs):
pythics.libcontrol.MPLControl.__init__(self, parent, **kwargs)
self._animated = False
self._animated_artists = list()
self._x_autoscale = True
self._y_autoscale = True
self._tight_autoscale = False
# dictionary of plot objects such as lines, points, etc.
self._items = dict()
# use modified matplotlib canvas to redraw correctly on resize
self._figure = matplotlib.figure.Figure()
self._canvas = PythicsMPLCanvas(self, self._figure)
self._widget = self._canvas
self._mpl_widget = self._canvas
if projection == 'polar':
self._axes = self._figure.add_subplot(111, polar=True)
self._polar = True
else:
self._axes = self._figure.add_subplot(111)
self._polar = False
# set_tight_layout doesn't seem to exist, so set tight_layout directly
#self._figure.set_tight_layout(True)
self._figure.tight_layout()
# set plot parameters from parameters passed in html
self._plot_properties = dict()
self.set_plot_properties(x_limits='auto',
y_limits='auto',
tight_autoscale=False,
x_scale='linear',
y_scale='linear',
aspect_ratio='auto',
dpi=150)
# should have resize event handler to redraw correctly,
# but it doesn't seem to work - instead we use custom plot canvas
#self._canvas.mpl_connect('resize_event', self._resize)
def _resize(self, event):
if self._animated:
self._full_animated_redraw()
else:
self._figure.tight_layout()
def _redraw(self):
# blit entire figure after tab change to eliminate drawing artifacts
if self._animated:
self._canvas.blit(self._figure.bbox)
def _update(self, rescale='auto'):
if self._animated:
if rescale == 'auto':
if self._x_autoscale or self._y_autoscale:
self._full_animated_redraw()
else:
self._fast_animated_redraw()
elif rescale == True:
self._full_animated_redraw()
else:
self._fast_animated_redraw()
else:
if rescale == 'auto':
if self._x_autoscale or self._y_autoscale:
self._axes.relim()
self._axes.autoscale_view(self._tight_autoscale,
self._x_autoscale, self._y_autoscale)
self._figure.tight_layout()
self._canvas.draw()
else:
self._canvas.draw()
elif rescale == True:
self._axes.relim()
self._axes.autoscale_view(self._tight_autoscale,
self._x_autoscale, self._y_autoscale)
self._figure.tight_layout()
self._canvas.draw()
else:
self._canvas.draw()
def _fast_animated_redraw(self):
self._canvas.restore_region(self._animated_background)
for k in self._animated_artists:
self._axes.draw_artist(self._items[k]['mpl_item'])
# just redraw the region within the axes
self._canvas.blit(self._axes.bbox)
def _full_animated_redraw(self):
self._axes.relim()
self._axes.autoscale_view(self._tight_autoscale,
self._x_autoscale, self._y_autoscale)
self._figure.tight_layout()
self._canvas.draw()
# update animation background artists
self._animated_background = self._canvas.copy_from_bbox(self._axes.bbox)
for k in self._animated_artists:
self._axes.draw_artist(self._items[k]['mpl_item'])
# blit entire canvas to ensure complete update
self._canvas.blit(self._figure.bbox)
#---------------------------------------------------
# methods below used only for access by action proxy
def clear(self, redraw=True, rescale='auto'):
"""Delete all plot items to clear the plot.
Optional keyword arguments:
*redraw*: [ *True* (default) | *False* ]
Whether to redraw the plot after applying changes.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
self._axes.clear()
self._items = dict()
self._animated_artists = list()
self._animated = False
if redraw:
self._update(rescale)
def new_curve(self, key, memory='array', length=1000, **kwargs):
"""Create a new curve or set of points on the plot.
Arguments:
*key*: str
The name you give to this plot item for future access.
Optional keyword arguments:
*memory*: [ 'array' (default) | 'circular' | 'growable' ]
Format for plot item data storage which determines how future updates
to the data can be made.
*length*: int
if *memory* == 'circular': The number of elements in the circular array.
if *memory* == 'growable': The initial number of elements in the array.
*animated*: [ *True* | *False* (default) ]
If *True*, try to redraw this item without redrawing the whole plot
whenever it is updated. This is generally faster if the axes do not
need to be rescaled, and thus is recommended for plot items that
are changed frequently.
*alpha*: ``0 <= scalar <= 1``
The alpha value for the curve. 0.0 is transparent and 1.0 is opaque.
*line_color*: any valid color, see more information below
The color used for drawing lines between points.
*line_style*: [ '-' | '--' | '-.' | ':' | '' ]
The following format string characters are accepted to control
the line style:
================ ===============================
character description
================ ===============================
``'-'`` solid line style (default)
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``''`` no line
================ ===============================
*line_width*: float value in points
The width of lines between points.
*marker_color*: any valid color, see more information below
The fill color of markers drawn at the specified points.
*marker_edge_color*: any valid color, see more information below
The color of the edges of markers or of the whole marker if the
marker consists of lines only.
*marker_edge_width*: float value in points
The width of the edges of markers or of the lines if the marker
consists of lines only.
*marker_style*: any valid marker style, see table below
The shape of the markers drawn.
The following format string characters are accepted to control
the marker style:
============================== ===============================
Value Description
============================== ===============================
``''`` no marker (default)
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
(*numsides*, *style*, *angle*) see below
============================== ===============================
The marker can also be a tuple (*numsides*, *style*, *angle*),
which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
*marker_width*: float value in points
The overall size of the markers draw at the data points.
Colors:
The following color abbreviations are supported:
===== =======
Value Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
In addition, you can specify colors in many other ways, including
full names (``'green'``), hex strings (``'#008000'``), RGB or
RGBA tuples (``(0,1,0,1)``) or grayscale intensities as a string (``'0.8'``).
"""
plot_kwargs = dict()
if 'alpha' in kwargs:
value = kwargs.pop('alpha')
plot_kwargs['alpha'] = value
if 'line_color' in kwargs:
value = kwargs.pop('line_color')
plot_kwargs['color'] = value
if 'line_style' in kwargs:
value = kwargs.pop('line_style')
plot_kwargs['linestyle'] = value
if 'line_width' in kwargs:
value = kwargs.pop('line_width')
plot_kwargs['linewidth'] = value
if 'marker_color' in kwargs:
value = kwargs.pop('marker_color')
plot_kwargs['markerfacecolor'] = value
if 'marker_edge_color' in kwargs:
value = kwargs.pop('marker_edge_color')
plot_kwargs['markeredgecolor'] = value
if 'marker_edge_width' in kwargs:
value = kwargs.pop('marker_edge_width')
plot_kwargs['markeredgewidth'] = value
if 'marker_style' in kwargs:
value = kwargs.pop('marker_style')
plot_kwargs['marker'] = value
if 'marker_width' in kwargs:
value = kwargs.pop('marker_width')
plot_kwargs['markersize'] = value
# check for an old plot item of the same name
if key in self._items:
item = self._items.pop(key)
item['mpl_item'].remove()
# create the plot item
if memory == 'circular':
data = pythics.lib.CircularArray(cols=2, length=length)
elif memory == 'growable':
data = pythics.lib.GrowableArray(cols=2, length=length)
else:
data = np.array([])
if ('animated' in kwargs) and kwargs.pop('animated'):
self._animated = True
item, = self._axes.plot(np.array([]), np.array([]), animated=True,
label=key, **plot_kwargs)
if len(self._animated_artists) == 0:
# this is the first animated artist, so we need to set up
self._animated_background = self._canvas.copy_from_bbox(self._axes.bbox)
self._animated_artists.append(key)
else:
item, = self._axes.plot(np.array([]), np.array([]), label=key,
**plot_kwargs)
self._items[key] = dict(item_type='curve', mpl_item=item, data=data,
memory=memory)
if len(kwargs) != 0:
logger = multiprocessing.get_logger()
logger.warning("Unused arguments in 'new_curve': %s."
% str(kwargs))
def new_image(self, key, **kwargs):
"""Create a new image item on the plot.
Arguments:
*key*: str
The name you give to this plot item for future access.
Optional keyword arguments:
*animated*: [ *True* | *False* (default) ]
If *True*, try to redraw this item without redrawing the whole plot
whenever it is updated. This is generally faster if the axes do not
need to be rescaled, and thus is recommended for plot items that
are changed frequently.
*alpha*: ``0 <= scalar <= 1``
The alpha value for the image. 0.0 is transparent and 1.0 is opaque.
*extent*: [ *None* (default) | scalars (left, right, bottom, top) ]
Data limits for the axes. The default assigns zero-based row,
column indices to the x, y centers of the pixels.
*interpolation*: str
Acceptable values are 'none', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos'
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
*colormap*: str
The name of a matplotlib colormap for mapping the data value to the
displayed color at each point.
*colormap* is ignored when *data* has RGB(A) information
*c_limits*: [ 'auto' (default) | scalars (vmin, vmax) ]
Data limits for the colormap.
"""
# create a new dictionary of options for plotting
plot_kwargs = dict()
if 'alpha' in kwargs:
value = kwargs.pop('alpha')
plot_kwargs['alpha'] = value
if 'extent' in kwargs:
value = kwargs.pop('extent')
plot_kwargs['extent'] = value
if 'interpolation' in kwargs:
value = kwargs.pop('interpolation')
plot_kwargs['interpolation'] = value
if 'origin' in kwargs:
value = kwargs.pop('origin')
plot_kwargs['origin'] = value
if 'colormap' in kwargs:
value = kwargs.pop('colormap')
plot_kwargs['cmap'] = value
if 'c_limits' in kwargs:
value = kwargs.pop('c_limits')
if value != 'auto':
plot_kwargs['vmin'] = value[0]
plot_kwargs['vmax'] = value[1]
# check for an old plot item of the same name
if key in self._items:
item = self._items.pop(key)
item['mpl_item'].remove()
# create the plot item
data = np.array([[0]])
#if self._polar:
# # THIS DOESN'T WORK??
if ('animated' in kwargs) and kwargs.pop('animated'):
self._animated = True
item = self._axes.imshow(data, animated=True, label=key,
aspect=self._plot_properties['aspect_ratio'],
**plot_kwargs)
if len(self._animated_artists) == 0:
# this is the first animated artist, so we need to set up
self._animated_background = self._canvas.copy_from_bbox(self._axes.bbox)
self._animated_artists.append(key)
else:
item = self._axes.imshow(data, label=key,
aspect=self._plot_properties['aspect_ratio'],
**plot_kwargs)
self._items[key] = dict(item_type='image', mpl_item=item, shape=(1, 1))
if len(kwargs) != 0:
logger = multiprocessing.get_logger()
logger.warning("Unused arguments in 'new_image': %s." % str(kwargs))
def new_colormesh(self, key, X, Y, **kwargs):
"""Create a new pseudocolor mesh item on the plot.
Arguments:
*key*: str
The name you give to this plot item for future access.
*X*: str
The x coordinates of the colored quadrilaterals.
*numpy.meshgrid()* may be helpful for making this.
*Y*: str
The x coordinates of the colored quadrilaterals.
*numpy.meshgrid()* may be helpful for making this.
Optional keyword arguments:
*animated*: [ *True* | *False* (default) ]
If *True*, try to redraw this item without redrawing the whole plot
whenever it is updated. This is generally faster if the axes do not
need to be rescaled, and thus is recommended for plot items that
are changed frequently.
*alpha*: ``0 <= scalar <= 1``
The alpha value for the image. 0.0 is transparent and 1.0 is opaque.
*extent*: [ *None* (default) | scalars (left, right, bottom, top) ]
Data limits for the axes. The default assigns zero-based row,
column indices to the x, y centers of the pixels.
*interpolation*: str
Acceptable values are 'none', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos'
*colormap*: str
The name of a matplotlib colormap for mapping the data value to the
displayed color at each point.
*colormap* is ignored when *data* has RGB(A) information
*c_limits*: [ 'auto' (default) | scalars (vmin, vmax) ]
Data limits for the colormap.
"""
# create a new dictionary of options for plotting
plot_kwargs = dict()
if 'alpha' in kwargs:
value = kwargs.pop('alpha')
plot_kwargs['alpha'] = value
if 'extent' in kwargs:
value = kwargs.pop('extent')
plot_kwargs['extent'] = value
if 'interpolation' in kwargs:
value = kwargs.pop('interpolation')
plot_kwargs['interpolation'] = value
if 'colormap' in kwargs:
value = kwargs.pop('colormap')
plot_kwargs['cmap'] = value
if 'c_limits' in kwargs:
value = kwargs.pop('c_limits')
if value != 'auto':
plot_kwargs['clim'] = value
# check for an old plot item of the same name
if key in self._items:
item = self._items.pop(key)
item['mpl_item'].remove()
# create the plot item
x_len, y_len = X.shape
zs = np.zeros((x_len-1, y_len-1))
if ('animated' in kwargs) and kwargs.pop('animated'):
self._animated = True
item = self._axes.pcolor(X, Y, zs, animated=True, label=key,
**plot_kwargs)
if len(self._animated_artists) == 0:
# this is the first animated artist, so we need to set up
self._animated_background = self._canvas.copy_from_bbox(self._axes.bbox)
self._animated_artists.append(key)
else:
item = self._axes.pcolor(X, Y, zs, label=key,
**plot_kwargs)
#self._axes.pcolormesh(X, Y, zs)
#self._axes.pcolor(X, Y, zs)
#self._axes.pcolorfast(X, Y, zs)
self._items[key] = dict(item_type='colormesh', mpl_item=item)
if len(kwargs) != 0:
logger = multiprocessing.get_logger()
logger.warning("Unused arguments in 'new_colormesh': %s." % str(kwargs))
def delete(self, key, redraw=True, rescale='auto'):
"""Delete a plot item.
Arguments:
*key*: str
The name you gave to the plot item when it was created.
Optional keyword arguments:
*redraw*: [ *True* (default) | *False* ]
Whether to redraw the plot after applying changes.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
item = self._items.pop(key)
item['mpl_item'].remove()
if redraw:
self._update(rescale)
def clear_data(self, key, redraw=True, rescale='auto'):
"""Delete all the data of a plot item.
Arguments:
*key*: str
The name you gave to the plot item when it was created.
Optional keyword arguments:
*redraw*: [ *True* (default) | *False* ]
Whether to redraw the plot after applying changes.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
item_value = self._items[key]
if item_value['item_type'] == 'curve':
item = item_value['mpl_item']
memory = item_value['memory']
old_data = item_value['data']
if memory == 'circular' or memory == 'growable':
old_data.clear()
item.set_data(np.array([]), np.array([]))
else:
item.set_data(np.array([]), np.array([]))
if redraw:
self._update(rescale)
def set_data(self, key, data, redraw=True, rescale='auto'):
"""Change the data of a plot item.
Arguments:
*key*: str
The name you gave to the plot item when it was created.
*data*: two-dimensional numpy array, list, or tuple or a PIL image
The new data to be assigned to the plot item.
For curves, *data* should be a series of points
of the form ((x1, y1), (x2, y2), ...).
For images, *data* should be a two dimensional float array, a uint8
array or a PIL image. If *data* is an array, *data* can have the
following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalized.
For colormeshes, *data* is a 2-D array, and the dimensions of *X*
and *Y* should be one greater than those of *data*.
Optional keyword arguments:
*redraw*: [ *True* (default) | *False* ]
Whether to redraw the plot after applying changes.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
item_value = self._items[key]
if item_value['item_type'] == 'curve':
# convert the appended object to an array
# if it starts as something else
if type(data) is not np.ndarray:
data = np.array(data)
if data.ndim != 2:
raise ValueError("'data' must be a two-dimensional array.")
item = item_value['mpl_item']
memory = item_value['memory']
old_data = item_value['data']
if memory == 'circular' or memory == 'growable':
old_data.clear()
old_data.append(data)
item.set_data(old_data[:,0], old_data[:,1])
else:
item.set_data(data[:,0], data[:,1])
if redraw:
if self._animated:
if rescale or (key not in self._animated_artists):
force_rescale = True
else:
force_rescale = False
if self._x_autoscale:
axis_min, axis_max = self._axes.get_xlim()
data_min = data[:,0].min()
data_max = data[:,0].max()
if (data_min < axis_min) or (data_max > axis_max) or (data_max-data_min < 0.5*(axis_max-axis_min)):
force_rescale = True
if self._y_autoscale:
axis_min, axis_max = self._axes.get_ylim()
data_min = data[:,1].min()
data_max = data[:,1].max()
if (data_min < axis_min) or (data_max > axis_max) or (data_max-data_min < 0.5*(axis_max-axis_min)):
force_rescale = True
if force_rescale:
# full update
self._full_animated_redraw()
else:
# only need to update and blit the axes region
self._fast_animated_redraw()
else:
if rescale or ((self._x_autoscale or self._y_autoscale) and (rescale == 'auto')):
self._axes.relim()
self._axes.autoscale_view(self._tight_autoscale, self._x_autoscale, self._y_autoscale)
self._figure.tight_layout()
self._canvas.draw()
else:
self._canvas.draw()
elif item_value['item_type'] == 'image':
if data.ndim != 2:
raise ValueError("'data' must be a two-dimensional array.")
item = item_value['mpl_item']
shape = item_value['shape']
item.set_data(data)
if self._animated:
#if rescale or (data.shape[0] != shape) or (data.shape[1] != shape):
# self._axes.relim()
# item.autoscale()
#self._full_animated_redraw()
# PROBLEMS WITH VIEW RANGE IN ABOVE CODE
# BELOW WORKS BUT IS STILL NOT EFFICIENT
# SHOULD REWRITE FOR SPEED
if rescale or (data.shape[0] != shape) or (data.shape[1] != shape):
self._axes.relim()
item.autoscale()
self._figure.tight_layout()
self._canvas.draw()
# update animation background artists
self._animated_background = self._canvas.copy_from_bbox(self._axes.bbox)
for k in self._animated_artists:
self._axes.draw_artist(self._items[k]['mpl_item'])
# blit entire canvas to ensure complete update
self._canvas.blit(self._figure.bbox)
else:
self._axes.relim()
item.autoscale()
self._figure.tight_layout()
self._canvas.draw()
elif item_value['item_type'] == 'colormesh':
item = item_value['mpl_item']
item.set_array(data.ravel())
item.autoscale()
if self._animated:
if rescale:
self._axes.relim()
item.autoscale()
self._full_animated_redraw()
else:
self._axes.relim()
item.autoscale()
self._figure.tight_layout()
self._canvas.draw()
def append_data(self, key, data, redraw=True, rescale='auto'):
"""Append data to a plot item.
Only works with curves which were created with
*memory* = 'circular' or *memory* = 'growable'.
Arguments:
*key*: str
The name you gave to the plot item when it was created.
*data*: one or two-dimensional numpy array, list, or tuple
The new data to be appended to the previous data of the plot item.
*data* should be a single point of the form (x, y) or a series of
points of the form ((x1, y1), (x2, y2), ...).
Keyword arguments:
*redraw*: [ *True* (default) | *False* ]
Whether to redraw the plot after applying changes.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
item_value = self._items[key]
if item_value['item_type'] == 'curve':
# convert the appended object to an array if it starts as something else
if type(data) is not np.ndarray:
data = np.array(data)
if data.ndim == 1:
data = np.array([data])
item = item_value['mpl_item']
memory = item_value['memory']
old_data = item_value['data']
old_data_length = len(old_data)
if memory == 'circular' or memory == 'growable':
old_data.append(data)
item.set_data(old_data[:,0], old_data[:,1])
else:
raise ValueError("Cannot append to curve item with memory == '%s'." % memory)
if redraw:
if self._animated:
if rescale == True or (key not in self._animated_artists):
force_rescale = True
elif rescale == False:
force_rescale = False
else:
force_rescale = False
if self._x_autoscale:
if old_data_length < 2:
# there may not have been enough to set the scale before
force_rescale = True
else:
axis_min, axis_max = self._axes.get_xlim()
data_min = data[:,0].min()
data_max = data[:,0].max()
if (data_min < axis_min) or (data_max > axis_max):
force_rescale = True
if self._y_autoscale:
if old_data_length < 2:
# there may not have been enough to set the scale before
force_rescale = True
else:
axis_min, axis_max = self._axes.get_ylim()
data_min = data[:,1].min()
data_max = data[:,1].max()
if (data_min < axis_min) or (data_max > axis_max):
force_rescale = True
if force_rescale:
# full update
self._full_animated_redraw()
else:
# only need to update and blit the axes region
self._fast_animated_redraw()
else:
if self._x_autoscale or self._y_autoscale:
self._axes.relim()
self._axes.autoscale_view(self._tight_autoscale,
self._x_autoscale, self._y_autoscale)
self._figure.tight_layout()
self._canvas.draw()
else:
self._canvas.draw()
else:
raise ValueError("Cannot append to plot item type '%s'." % item_value['item_type'])
def set_properties(self, key, redraw=True, rescale='auto', **kwargs):
"""Set the graphical properties of a plot item.
Arguments:
*key*: str
The name you gave to the plot item when it was created.
Optional keyword arguments:
Any of the the keyword arguments describing the graphical representation
of the plot item that can be given when the item is created.
*redraw*: [ *True* (default) | *False* ]
Whether to redraw the plot after applying changes.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
item_value = self._items[key]
if item_value['item_type'] == 'curve':
item = item_value['mpl_item']
if 'alpha' in kwargs:
value = kwargs.pop('alpha')
item.set_alpha(value)
if 'line_color' in kwargs:
value = kwargs.pop('line_color')
item.set_color(value)
if 'line_style' in kwargs:
value = kwargs.pop('line_style')
item.set_linestyle(value)
if 'line_width' in kwargs:
value = kwargs.pop('line_width')
item.set_linewidth(value)
if 'marker_color' in kwargs:
value = kwargs.pop('marker_color')
item.set_markerfacecolor(value)
if 'marker_edge_color' in kwargs:
value = kwargs.pop('marker_edge_color')
item.set_markeredgecolor(value)
if 'marker_edge_width' in kwargs:
value = kwargs.pop('marker_edge_width')
item.set_markeredgewidth(value)
if 'marker_style' in kwargs:
value = kwargs.pop('marker_style')
item.set_marker(value)
if 'marker_width' in kwargs:
value = kwargs.pop('marker_width')
item.set_markersize(value)
elif item_value['item_type'] == 'image':
item = item_value['mpl_item']
if 'c_limits' in kwargs:
value = kwargs.pop('c_limits')
item.set_clim(value[0], value[1])
elif item_value['item_type'] == 'colormesh':
item = item_value['mpl_item']
if 'c_limits' in kwargs:
value = kwargs.pop('c_limits')
item.set_clim(value[0], value[1])
if len(kwargs) != 0:
logger = multiprocessing.get_logger()
logger.warning("Unused arguments in 'set_properties': %s."
% str(kwargs))
if redraw:
self._update(rescale)
def set_plot_properties(self, redraw=True, rescale='auto', **kwargs):
"""Set the graphical properties of a plot.
Optional keyword arguments:
*aspect_ratio*: ['auto' (default) | 'equal' | a number ]
*x_limits*: ['auto' (default) | scalars (x_min, x_max)]
*y_limits*: ['auto' (default) | scalars (y_min, y_max)]
*tight_autoscale*: [*True* | *False* (default)]
*x_scale*: ['linear' (default) | 'log']
Scaling of the x-axis.
*y_scale*: ['linear' (default) | 'log']
Scaling of the y-axis.
*title*: str
Title to be drawn above the plot.
*x_label*: str
Label to be drawn on the x-axis of the plot.
*y_label*: str
Label to be drawn on the y-axis of the plot.
*dpi*: int
Resolution (dots per inch) of plots saved to files.
*redraw*: [ *True* (default) | *False* ]
Whether to redraw the plot after applying changes.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
self._plot_properties.update(kwargs)
if 'background' in kwargs:
value = kwargs.pop('background')
if 'aspect_ratio' in kwargs:
# 'auto', 'equal', or a number
value = kwargs.pop('aspect_ratio')
self._axes.set_aspect(value)
if 'x_limits' in kwargs:
value = kwargs.pop('x_limits')
if value == 'auto':
self._axes.set_xlim(auto=True)
self._x_autoscale = True
else:
self._axes.set_xlim(value, auto=False)
self._x_autoscale = False
if 'y_limits' in kwargs:
value = kwargs.pop('y_limits')
if value == 'auto':
self._axes.set_ylim(auto=True)
self._y_autoscale = True
else:
self._axes.set_ylim(value, auto=False)
self._y_autoscale = False
if 'tight_autoscale' in kwargs:
value = kwargs.pop('tight_autoscale')
self._tight_autoscale = value
if 'x_scale' in kwargs:
value = kwargs.pop('x_scale')
self._axes.set_xscale(value)
if 'y_scale' in kwargs:
value = kwargs.pop('y_scale')
self._axes.set_yscale(value)
if 'title' in kwargs:
value = kwargs.pop('title')
self._axes.set_title(value)
if 'x_label' in kwargs:
value = kwargs.pop('x_label')
self._axes.set_xlabel(value)
if 'y_label' in kwargs:
value = kwargs.pop('y_label')
self._axes.set_ylabel(value)
if 'x_grid' in kwargs:
value = kwargs.pop('x_grid')
if 'y_grid' in kwargs:
value = kwargs.pop('y_grid')
if 'dpi' in kwargs:
self._plot_properties['dpi'] = kwargs.pop('dpi')
if len(kwargs) != 0:
logger = multiprocessing.get_logger()
logger.warning("Unused arguments in 'set_plot_properties': %s."
% str(kwargs))
if redraw:
self._update(rescale)
def save_figure(self, filename=None, rescale='auto'):
"""Save an image of the plot to a file.
Optional keyword arguments:
*filename*: str
The name of the file to save to. If no filename is given, a dialog
box in which to choose a filname will be presented.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
if filename is None:
filetypes = self._canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self._canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selected_filter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selected_filter = filter
filters.append(filter)
filters = ';;'.join(filters)
filename = QtGui.QFileDialog.getSaveFileName(None, 'Save Plot Image',
start, filters,
selected_filter)
if filename:
try:
if self._animated:
for k in self._animated_artists:
item = self._items[k]['mpl_item']
item.set_animated(False)
self._animated = False
self._update(rescale)
self._canvas.print_figure(unicode(filename),
bbox_inches='tight',
dpi=self._plot_properties['dpi'])
for k in self._animated_artists:
item = self._items[k]['mpl_item']
item.set_animated(True)
self._animated = True
else:
self._canvas.print_figure(unicode(filename),
bbox_inches='tight',
dpi=self._plot_properties['dpi'])
except Exception, e:
QtGui.QMessageBox.critical(
None, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
#
# Chart2D: scrolled plot panel with multiple line plots
#
class Chart2D(pythics.libcontrol.MPLControl):
"""Make a strip chart like plot of data with multiple y values for a given
x value. Additional data can be added efficiently and the user can scroll
through the chart history with a built-in scrollbar. Right click on the
plot to save an image of the plot to a file.
HTML parameters:
*plots*: int (default 1)
The number of plots to draw, stacked vertically with a common x axis.
Note that each plot may have multiple curves, as set with the
*curves_per_plot* property.
*memory*: int [ 'circular' (default) | 'growable' ]
Speicifies how data will be stored, and what happens when the
orginally allocated memory is full. See the *length* parameter for more
information.
*length*: int (default 1000)
The maximum number of points for the plot to store. Additional points
will force earlier points to scroll out of range (if *memory* = 'circular')
or grow the memory (if *memory* = 'growable').
*fast_scroll*: [ *True* | *False* (default) ]
Whether to accelerate scrolling by not drawing axes while scrolling.
*actions*: dict
a dictionarly of key:value pairs where the key is the name of a signal
and value is the function to run when the signal is emitted
actions in this control:
======================= ============================================
signal when emitted
======================= ============================================
'button_press_event' a mouse button is pressed
'button_release_event' a mouse button is released
'draw_event' canvas is redrawn
'key_press_event' a key is pressed
'key_release_event' a key is released
'motion_notify_event' the mouse is moved
'pick_event' an object in the canvas is selected
'resize_event' the figure canvas is resized
'scroll_event' the mouse scroll wheel is rolled
'figure_enter_event' the mouse enters a new figure
'figure_leave_event' the mouse leaves a figure
'axes_enter_event' the mouse enters a new axes
'axes_leave_event' the mouse leaves an axe
======================= ============================================
"""
def __init__(self, parent, plots=1, memory='circular', length=1000,
fast_scroll=False, **kwargs):
pythics.libcontrol.MPLControl.__init__(self, parent, **kwargs)
# initialize parameters that only depend on the number of plots
self._n_plots = plots
self._n_plot_range = range(self._n_plots)
self._memory = memory
self._history_length = length
self._fast_scroll = fast_scroll
self._requested_span = self._history_length
self._span = self._requested_span
self._span_choice = 'autoscale span'
# setup the layout and plot widget
self._widget = QtGui.QFrame()
self._sizer = QtGui.QVBoxLayout()
self._figure = matplotlib.figure.Figure()
self._canvas = PythicsMPLCanvas(self, self._figure)
self._sizer.addWidget(self._canvas)
self._mpl_widget = self._canvas
# row of controls below the plot
self._row_sizer = QtGui.QHBoxLayout()
self._sizer.addLayout(self._row_sizer)
# set up scoll bar
self._scroll_to_end = True
self._pressed = False
self._scrollbar = QtGui.QScrollBar(QtCore.Qt.Horizontal)
self._scrollbar.setTracking(True)
self._row_sizer.addWidget(self._scrollbar)
self._scrollbar.valueChanged.connect(self._scroll)
self._scrollbar.sliderPressed.connect(self._pressed_start)
self._scrollbar.sliderReleased.connect(self._pressed_end)
# choice of autoscale or fixed span
self._choice_widget = QtGui.QComboBox()
self._choice_widget.insertItem(2, 'autoscale span')
self._choice_widget.insertItem(2, 'fixed span')
self._choice_widget.setFixedWidth(150)
self._choice_widget.activated.connect(self._change_span_choice)
self._row_sizer.addWidget(self._choice_widget)
# box to hold fixed span
self._span_widget = QtGui.QSpinBox()
self._span_widget.setSingleStep(1)
self._span_widget.setMinimum(2)
self._span_widget.setMaximum(self._history_length)
self._span_widget.setFixedWidth(150)
self._span_widget.setValue(self._history_length)
self._span_widget.valueChanged.connect(self._change_span)
self._row_sizer.addWidget(self._span_widget)
self._widget.setLayout(self._sizer)
# initialize parameters that depend on the number of lines
self._n_curves_per_plot = list([1])*self._n_plots
# set up plots
self._plot_axes = list()
self._y_autoscales = list()
self._plot_properties = list()
default_plot_properties = dict(x_limits='auto',
y_limits='auto',
x_scale='linear',
y_scale='linear',
aspect_ratio='auto',
dpi=150)
for i in self._n_plot_range:
if i == 0:
self._plot_axes.append(self._figure.add_subplot(self._n_plots, 1, i+1))
else:
self._plot_axes.append(self._figure.add_subplot(self._n_plots, 1, i+1, sharex=self._plot_axes[0]))
self._plot_properties.append(default_plot_properties.copy())
self._y_autoscales.append(True)
self._set_plot_properties(i, **default_plot_properties)
self._fast_requested = False
self._fast = False
self.clear()
# should have resize event handler to redraw correctly,
# but it doesn't seem to work - instead we use custom plot canvas
#self._canvas.mpl_connect('resize_event', self.on_resize)
def _change_span(self, *args):
self._requested_span = long(self._span_widget.value())
if self._span_choice == 'fixed span':
self._set_span(self._requested_span)
def _change_span_choice(self, *args):
self._span_choice = self._choice_widget.currentText()
if self._span_choice == 'fixed span':
self._set_span(self._requested_span)
else:
self._set_span(self._history_length)
def _set_span(self, span):
self._span = span
length = len(self._data)
self._scroll_page_size = min(span, length)
self._scroll_position = min(self._scroll_position,
length - self._scroll_page_size)
self._update_scrollbar()
self._update_plot()
def _resize(self, event):
if self._fast:
self._canvas.draw()
self._animated_background = self._canvas.copy_from_bbox(self._figure.bbox)
k = 0
for i in range(self._n_plots):
for j in range(self._n_curves_per_plot[i]):
self._plot_axes[i].draw_artist(self.curves[k])
k += 1
# blit entire canvas to ensure complete update
self._canvas.blit(self._figure.bbox)
else:
self._full_redraw()
def _redraw(self):
# blit entire figure after tab change to eliminate drawing artifacts
self._canvas.blit(self._figure.bbox)
def _scroll(self):
self._scroll_position = self._scrollbar.value()
self._update_plot()
def _pressed_start(self):
self._pressed = True
if self._fast_scroll:
self._start_fast()
self._full_redraw(False)
def _pressed_end(self):
self._pressed = False
if self._fast_scroll:
self._stop_fast()
def _go_to_end(self):
return self._scroll_to_end and (not self._pressed)
def _update_scrollbar(self):
self._scrollbar.setMaximum(len(self._data)-self._scroll_page_size)
self._scrollbar.setPageStep(self._scroll_page_size)
self._scrollbar.setValue(self._scroll_position)
def _update_plot(self, layout=True):
start = self._scroll_position
stop = self._scroll_position + self._scroll_page_size
# update data
data_x_ys = self._data[start:stop]
# all share the same x values
data_x = data_x_ys[:,0]
for i in range(self.n_curves_total):
data_y = data_x_ys[:,i+1]
self.curves[i].set_data(data_x, data_y)
# replot
for i in range(self._n_plots):
axes = self._plot_axes[i]
axes.relim()
axes.autoscale_view(True, True, self._y_autoscales[i])
if self._fast:
self._fast_redraw()
else:
self._full_redraw(layout)
def _start_fast(self):
if not self._fast:
for i in range(self._n_plots):
self._plot_axes[i].get_xaxis().set_visible(False)
self._plot_axes[i].get_yaxis().set_visible(False)
self._canvas.draw()
self._animated_background = self._canvas.copy_from_bbox(self._figure.bbox)
self._fast = True
def _fast_redraw(self):
self._canvas.restore_region(self._animated_background)
k = 0
for i in range(self._n_plots):
for j in range(self._n_curves_per_plot[i]):
self._plot_axes[i].draw_artist(self.curves[k])
k += 1
# redraw the region in each axes
self._canvas.blit(self._plot_axes[i].bbox)
def _stop_fast(self):
if (not self._fast_scroll or not self._pressed) and (not self._fast_requested):
self._fast = False
for i in range(self._n_plots):
self._plot_axes[i].get_xaxis().set_visible(True)
self._plot_axes[i].get_yaxis().set_visible(True)
self._full_redraw()
def _full_redraw(self, layout=True):
if layout:
self._figure.tight_layout()
self._canvas.draw()
k = 0
for i in range(self._n_plots):
for j in range(self._n_curves_per_plot[i]):
self._plot_axes[i].draw_artist(self.curves[k])
k += 1
# blit entire canvas to ensure complete update
self._canvas.blit(self._figure.bbox)
def _set_plot_properties(self, n, **kwargs):
axes = self._plot_axes[n]
if 'background' in kwargs:
value = kwargs.pop('background')
if 'aspect_ratio' in kwargs:
# 'auto', 'equal', or a number
value = kwargs.pop('aspect_ratio')
axes.set_aspect(value)
if 'x_limits' in kwargs:
value = kwargs.pop('x_limits')
if value == 'auto':
axes.set_xlim(auto=True)
self._x_autoscale = True
else:
axes.set_xlim(value, auto=False)
self._x_autoscale = False
if 'y_limits' in kwargs:
value = kwargs.pop('y_limits')
if value == 'auto':
axes.set_ylim(auto=True)
self._y_autoscales[n] = True
else:
axes.set_ylim(value, auto=False)
self._y_autoscales[n] = False
if 'x_scale' in kwargs:
value = kwargs.pop('x_scale')
axes.set_xscale(value)
if 'y_scale' in kwargs:
value = kwargs.pop('y_scale')
axes.set_yscale(value)
if 'title' in kwargs:
value = kwargs.pop('title')
axes.set_title(value)
if 'x_label' in kwargs:
value = kwargs.pop('x_label')
axes.set_xlabel(value)
if 'y_label' in kwargs:
value = kwargs.pop('y_label')
axes.set_ylabel(value)
if 'x_grid' in kwargs:
value = kwargs.pop('x_grid')
if 'y_grid' in kwargs:
value = kwargs.pop('y_grid')
if 'dpi' in kwargs:
self.dpi = kwargs.pop('dpi')
if len(kwargs) != 0:
logger = multiprocessing.get_logger()
logger.warning("Unused arguments in 'set_plot_properties': %s."
% str(kwargs))
#---------------------------------------------------
# methods below used only for access by action proxy
def append_data(self, data):
"""Append data to the plot.
Arguments:
*data*: one or two-dimensional numpy array, list, or tuple
The new data to be appended to the previous data of the plot item.
*data* should be a single point of the form [x, y_1, y_2, ...] or a
series of points of the form:
[[x_0, y_01, y_02, ...], [x_1, y_11, y_12, ...], ...].
"""
self._data.append(data)
length = len(self._data)
self._scroll_page_size = min(self._span, length)
if self._go_to_end():
self._scroll_position = length - self._scroll_page_size
self._update_scrollbar()
self._update_plot(layout=False)
def clear(self):
"""Clear all data and labels from the plot.
"""
# initialize all curve properties when the number of curves is changed
# first clear out old curves
for i in self._n_plot_range:
self._plot_axes[i].clear()
self.n_curves_total = sum(self._n_curves_per_plot)
# list of curves (points or lines)
self.curves = list()
for i in range(self._n_plots):
for j in range(self._n_curves_per_plot[i]):
item, = self._plot_axes[i].plot(np.array([]), np.array([]),
animated=True)
self.curves.append(item)
for i in self._n_plot_range[0:-1]:
for label in self._plot_axes[i].get_xticklabels():
label.set_visible(False)
# clear out the data
if self._memory == 'growable':
self._data = pythics.lib.GrowableArray(cols=self.n_curves_total+1,
length=self._history_length)
else:
self._data = pythics.lib.CircularArray(cols=self.n_curves_total+1,
length=self._history_length)
self._scroll_page_size = 0
self._scroll_position = 0
self._update_scrollbar()
self._update_plot()
def clear_data(self):
"""Clear all data from the plot.
"""
self._data.clear()
self._scroll_page_size = 0
self._scroll_position = 0
self._update_scrollbar()
self._update_plot()
def update(self):
"""Force the plot to update. This should not normally be necessary.
"""
self._update_plot()
def set_data(self, data):
"""Set the data displayed on the plot, replacing the old data.
Arguments:
*data*: one or two-dimensional numpy array, list, or tuple
The new data to be appended to the previous data of the plot item.
*data* should be a single point of the form [x, y_1, y_2, ...] or a
series of points of the form:
[[x_0, y_01, y_02, ...], [x_1, y_11, y_12, ...], ...].
"""
self._data.clear()
self._data.append(data)
length = len(self._data)
self._scroll_page_size = min(self._span, length)
if self._go_to_end():
self._scroll_position = length - self._scroll_page_size
self._update_scrollbar()
self._update_plot(layout=False)
def _get_scroll_to_end(self):
"""Whether to scroll to the right as new data is added to the plot.
[ *True* (default) | *False* ]
"""
return self._scroll_to_end
def _set_scroll_to_end(self, value):
self._scroll_to_end = value
if value == True:
self._scroll_position = len(self._data) - self._scroll_page_size
self._update_scrollbar()
self._update_plot()
scroll_to_end = property(_get_scroll_to_end, _set_scroll_to_end)
def _get_fast(self):
"""Whether to skip drawing the axes to accelerate drawing. Set to
*False* when done with updates to force a full redraw.
[ *True* (default) | *False* ]
"""
return self._fast_requested
def _set_fast(self, value):
if value is not self._fast_requested:
self._fast_requested = value
# change requested
if value:
self._start_fast()
else:
self._stop_fast()
fast = property(_get_fast, _set_fast)
def _get_curves_per_plot(self):
"""A list integers specifying how many curves are to be drawn in each
plot. [ 1, 1, 2 ] would specify 1 curve in the first plot, 1 in the
second, and two in the third. These curves would be numbered 0 through
3 for access in other methods.
"""
return self._n_curves_per_plot
def _set_curves_per_plot(self, value):
self._n_curves_per_plot = value
self.clear()
curves_per_plot = property(_get_curves_per_plot, _set_curves_per_plot)
def set_plot_properties(self, n, **kwargs):
"""Set the graphical properties of a plot.
Arguments:
*n*: int
The index of the plot of which to set the properties.
Optional keyword arguments:
*y_limits*: [ 'auto' (default) | scalars (y_min, y_max) ]
*x_scale*: [ 'linear' (default) | 'log' ]
Scaling of the x-axis.
*y_scale*: [ 'linear' (default) | 'log' ]
Scaling of the y-axis.
*title*: str
Title to be drawn above the plot.
*x_label*: str
Label to be drawn on the x-axis of the plot.
*y_label*: str
Label to be drawn on the y-axis of the plot.
*dpi*: int
Resolution (dots per inch) of plots saved to files.
"""
self._set_plot_properties(n, **kwargs)
self._update_plot()
def set_curve_properties(self, n, **kwargs):
"""Set the graphical properties of a curve item.
Arguments:
*n*: int
The index of the plot of which to set the properties.
Optional keyword arguments:
Any of the the keyword arguments that can be given to specify the
properties of a curve in Plot2D (listed under *new_curve*).
"""
item = self.curves[n]
if 'alpha' in kwargs:
value = kwargs.pop('alpha')
item.set_alpha(value)
if 'line_color' in kwargs:
value = kwargs.pop('line_color')
item.set_color(value)
if 'line_style' in kwargs:
value = kwargs.pop('line_style')
item.set_linestyle(value)
if 'line_width' in kwargs:
value = kwargs.pop('line_width')
item.set_linewidth(value)
if 'marker_color' in kwargs:
value = kwargs.pop('marker_color')
item.set_markerfacecolor(value)
if 'marker_edge_color' in kwargs:
value = kwargs.pop('marker_edge_color')
item.set_markeredgecolor(value)
if 'marker_edge_width' in kwargs:
value = kwargs.pop('marker_edge_width')
item.set_markeredgewidth(value)
if 'marker_style' in kwargs:
value = kwargs.pop('marker_style')
item.set_marker(value)
if 'marker_width' in kwargs:
value = kwargs.pop('marker_width')
item.set_markersize(value)
if len(kwargs) != 0:
logger = multiprocessing.get_logger()
logger.warning("Unused arguments in 'set_properties': %s."
% str(kwargs))
self._update_plot()
def save_figure(self, filename=None):
"""Save an image of the plot to a file.
Optional keyword arguments:
*filename*: str
The name of the file to save to. If no filename is given, a dialog
box in which to choose a filname will be presented.
*rescale*: [ 'auto' (default) | *True* | *False* ]
Whether to rescale the plot. If 'auto', then only rescale if needed.
"""
if filename is None:
filetypes = self._canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self._canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selected_filter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selected_filter = filter
filters.append(filter)
filters = ';;'.join(filters)
filename = QtGui.QFileDialog.getSaveFileName(None, 'Save Plot Image',
start, filters,
selected_filter)
if filename:
try:
for item in self.curves:
item.set_animated(False)
self._animated = False
self._canvas.draw()
self._canvas.print_figure(unicode(filename),
bbox_inches='tight',
dpi=self.dpi)
for item in self.curves:
item.set_animated(True)
self._animated = True
except Exception, e:
QtGui.QMessageBox.critical(
None, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
class PythicsMPLCanvas(FigureCanvas):
def __init__(self, pythics_control, *args, **kwargs):
self._pythics_control = pythics_control
FigureCanvas.__init__(self, *args, **kwargs)
# setup context menu on right-click
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._popup)
def _popup(self, pos):
menu = QtGui.QMenu()
save_action = menu.addAction('Save...')
action = menu.exec_(self.mapToGlobal(pos))
if action == save_action:
self._pythics_control.save_figure()
def resizeEvent(self, event):
FigureCanvas.resizeEvent(self, event)
self._pythics_control._resize(event)
| gpl-3.0 |
ElDeveloper/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
gsnyder206/mock-surveys | original_illustris/create_filter_package.py | 1 | 15011 | import numpy as np
import astropy
import astropy.cosmology
import astropy.io.fits as pyfits
import astropy.units as u
import astropy.io.ascii as ascii
import os
import pandas
def hdu_from_existing_filter(input_file):
return hdu
def wfirst_filters():
xls='WFIRST_WIMWSM_throughput_data_190531.xlsm'
throughputs=pandas.read_excel(xls,'EffectiveArea')
wl_microns=throughputs['Unnamed: 0'][19:].values
r062_A=throughputs['Unnamed: 1'][19:].values
z087_A=throughputs['Unnamed: 2'][19:].values
y106_A=throughputs['Unnamed: 3'][19:].values
j129_A=throughputs['Unnamed: 4'][19:].values
w146_A=throughputs['Unnamed: 5'][19:].values
h158_A=throughputs['Unnamed: 6'][19:].values
f184_A=throughputs['Unnamed: 7'][19:].values
maxA = np.max(throughputs[['Unnamed: 2','Unnamed: 3','Unnamed: 4',
'Unnamed: 5','Unnamed: 6','Unnamed: 7','Unnamed: 1']][19:].values)
r_tr = r062_A/maxA
z_tr = z087_A/maxA
y_tr = y106_A/maxA
j_tr = j129_A/maxA
w_tr = w146_A/maxA
h_tr = h158_A/maxA
f_tr = f184_A/maxA
rhdu = wf_hdu(r_tr[r_tr > 1.0e-4],wl_microns[r_tr > 1.0e-4])
rhdu.header['EXTNAME']='wfirst/wfi_r062'
zhdu = wf_hdu(z_tr[z_tr > 1.0e-4],wl_microns[z_tr > 1.0e-4])
zhdu.header['EXTNAME']='wfirst/wfi_z087'
yhdu = wf_hdu(y_tr[y_tr > 1.0e-4],wl_microns[y_tr > 1.0e-4])
yhdu.header['EXTNAME']='wfirst/wfi_y106'
jhdu = wf_hdu(j_tr[j_tr > 1.0e-4],wl_microns[j_tr > 1.0e-4])
jhdu.header['EXTNAME']='wfirst/wfi_j129'
whdu = wf_hdu(w_tr[w_tr > 1.0e-4],wl_microns[w_tr > 1.0e-4])
whdu.header['EXTNAME']='wfirst/wfi_w146'
hhdu = wf_hdu(h_tr[h_tr > 1.0e-4],wl_microns[h_tr > 1.0e-4])
hhdu.header['EXTNAME']='wfirst/wfi_h158'
fhdu = wf_hdu(f_tr[f_tr > 1.0e-4],wl_microns[f_tr > 1.0e-4])
fhdu.header['EXTNAME']='wfirst/wfi_f184'
'''
Z_tr = Z_Aeff_m2/np.max(Z_Aeff_m2)
Y_tr = Y_Aeff_m2/np.max(Y_Aeff_m2)
J_tr = J_Aeff_m2/np.max(J_Aeff_m2)
W_tr = W_Aeff_m2/np.max(W_Aeff_m2)
H_tr = H_Aeff_m2/np.max(H_Aeff_m2)
F_tr = F_Aeff_m2/np.max(F_Aeff_m2)
zhdu = wf_hdu(Z_tr,Z_wl_um)
zhdu.header['EXTNAME']='WFI_DRM15_Z087'
yhdu = wf_hdu(Y_tr,Y_wl_um)
yhdu.header['EXTNAME']='WFI_DRM15_Y106'
jhdu = wf_hdu(J_tr,J_wl_um)
jhdu.header['EXTNAME']='WFI_DRM15_J129'
whdu = wf_hdu(W_tr,W_wl_um)
whdu.header['EXTNAME']='WFI_DRM15_W149'
hhdu = wf_hdu(H_tr,H_wl_um)
hhdu.header['EXTNAME']='WFI_DRM15_H158'
fhdu = wf_hdu(F_tr,F_wl_um)
fhdu.header['EXTNAME']='WFI_DRM15_F184'
'''
return (rhdu,zhdu,yhdu,jhdu,whdu,hhdu,fhdu)
def wfirst_filters_drm15():
Z_wl_um = np.asarray([0.75,0.7559,0.7617,0.7676,0.7734,0.7793,0.7851,0.7910,0.7968,0.8027,0.8085,0.8144,0.8202,0.8261,0.8320,0.8378,0.8437,0.8495,0.8554,0.8612,0.8671,0.8729,0.8788,0.8846,0.8905,0.8963,0.9022,0.9080,0.9139,0.9198,0.9256,0.9315,0.9373,0.9432,0.9490,0.9549,0.9607,0.9666,0.9724,0.9783,0.9841])
Z_Aeff_m2 = np.asarray([0.069,1.955,2.317,2.292,2.292,2.294,2.279,2.275,2.266,2.271,2.268,2.272,2.266,2.270,2.262,2.258,2.263,2.247,2.251,2.252,2.273,2.270,2.267,2.265,2.264,2.253,2.258,2.255,2.255,2.253,2.253,2.249,2.246,2.235,2.231,2.219,2.214,2.117,0.050,0.000,0.000])
Y_wl_um = np.asarray([0.92,0.9280,0.9361,0.9441,0.9522,0.9602,0.9683,0.9763,0.9844,0.9924,1.0005,1.0085,1.0166,1.0246,1.0327,1.0407,1.0488,1.0568,1.0649,1.0729,1.0810,1.0890,1.0971,1.1051,1.1132,1.1212,1.1293,1.1373,1.1454,1.1534,1.1615,1.1695,1.1776,1.1856,1.1937,1.2017,1.2098,1.2178,1.2259,1.2339,1.2420,1.2500])
Y_Aeff_m2 = np.asarray([0.204,0.743,1.282,1.819,2.139,2.138,2.137,2.136,2.135,2.134,2.133,2.138,2.147,2.153,2.162,2.168,2.177,2.184,2.194,2.205,2.215,2.225,2.233,2.244,2.254,2.264,2.273,2.286,2.294,2.306,2.319,2.328,2.242,1.791,1.338,0.879,0.883,0.418,0.000,0.000,0.000,0.000])
J_wl_um = np.asarray([1.1,1.1098,1.1195,1.1293,1.1390,1.1488,1.1585,1.1683,1.1780,1.1878,1.1976,1.2073,1.2171,1.2266,1.2366,1.2463,1.2561,1.2659,1.2756,1.2854,1.2951,1.3049,1.3146,1.3244,1.3341,1.3439,1.3537,1.3634,1.3732,1.3829,1.3927,1.4024,1.4122,1.4220,1.4317,1.4415,1.4512,1.4610,1.4707,1.4805,1.4902,1.5000])
J_Aeff_m2 = np.asarray([0.000,0.000,0.208,0.678,1.154,1.637,2.124,2.328,2.341,2.354,2.368,2.380,2.393,2.406,2.420,2.433,2.446,2.462,2.475,2.487,2.500,2.512,2.524,2.536,2.548,2.559,2.570,2.581,2.592,2.607,2.618,2.626,2.633,2.643,2.430,2.012,1.590,1.164,0.736,0.304,0.000,0.000])
W_wl_um = np.asarray([0.8,0.8293,0.8585,0.8878,0.9171,0.9463,0.9756,1.0049,1.0341,1.0634,1.0927,1.1220,1.1512,1.1805,1.2098,1.2390,1.2683,1.2976,1.3268,1.3561,1.3854,1.4146,1.4439,1.4732,1.5024,1.5317,1.5610,1.5902,1.6195,1.6488,1.6780,1.7073,1.7366,1.7659,1.7951,1.8247,1.8537,1.8829,1.9122,1.9412,1.9707,2.0000])
W_Aeff_m2 = np.asarray([0.000,0.000,0.000,0.000,0.153,1.864,1.871,1.948,1.987,2.052,2.087,1.982,2.147,2.226,2.263,2.221,2.299,2.345,2.346,2.429,2.270,2.358,2.444,2.460,2.510,2.530,2.555,2.579,2.576,2.650,2.603,2.653,2.639,2.683,2.640,2.648,2.641,2.586,2.651,2.635,2.618,0.002])
H_wl_um = np.asarray([1.35,1.3622,1.3744,1.3866,1.3988,1.4110,1.4232,1.4354,1.4476,1.4598,1.4720,1.4841,1.4963,1.5085,1.5207,1.5329,1.5451,1.5573,1.5695,1.5817,1.5939,1.6061,1.6183,1.6305,1.6427,1.6549,1.6671,1.6793,1.6915,1.7037,1.7159,1.7280,1.7402,1.7524,1.7646,1.7768,1.7890,1.8012,1.8134,1.8256,1.8378,1.8500])
H_Aeff_m2 = np.asarray([0.000,0.495,0.937,1.383,1.834,2.630,2.644,2.661,2.674,2.687,2.698,2.707,2.717,2.725,2.735,2.745,2.754,2.762,2.768,2.776,2.784,2.792,2.799,2.806,2.811,2.816,2.823,2.829,2.834,2.840,2.845,2.850,2.779,2.408,2.035,1.660,1.285,0.530,0.152,0.000,0.000,0.000])
F_wl_um = np.asarray([1.65,1.6610,1.6720,1.6829,1.6939,1.7049,1.7159,1.7268,1.7378,1.7488,1.7598,1.7707,1.7817,1.7927,1.8037,1.8146,1.8256,1.8366,1.8476,1.8585,1.8695,1.8805,1.8915,1.9024,1.9134,1.9244,1.9354,1.9463,1.9573,1.9683,1.9793,1.9902,2.0012,2.0122,2.0232,2.0341,2.0451,2.0561,2.0671,2.0780,2.0890,2.1000])
F_Aeff_m2 = np.asarray([0.000,0.521,0.877,1.234,1.592,1.952,2.312,2.580,2.585,2.588,2.592,2.595,2.598,2.601,2.601,2.601,2.603,2.605,2.606,2.609,2.611,2.614,2.615,2.616,2.618,2.621,2.623,2.630,2.635,2.618,2.313,1.697,1.390,1.083,0.775,0.466,0.158,0.000,0.000,0.000,0.000,0.000])
Z_tr = Z_Aeff_m2/np.max(Z_Aeff_m2)
Y_tr = Y_Aeff_m2/np.max(Y_Aeff_m2)
J_tr = J_Aeff_m2/np.max(J_Aeff_m2)
W_tr = W_Aeff_m2/np.max(W_Aeff_m2)
H_tr = H_Aeff_m2/np.max(H_Aeff_m2)
F_tr = F_Aeff_m2/np.max(F_Aeff_m2)
zhdu = wf_hdu(Z_tr,Z_wl_um)
zhdu.header['EXTNAME']='WFI_DRM15_Z087'
yhdu = wf_hdu(Y_tr,Y_wl_um)
yhdu.header['EXTNAME']='WFI_DRM15_Y106'
jhdu = wf_hdu(J_tr,J_wl_um)
jhdu.header['EXTNAME']='WFI_DRM15_J129'
whdu = wf_hdu(W_tr,W_wl_um)
whdu.header['EXTNAME']='WFI_DRM15_W149'
hhdu = wf_hdu(H_tr,H_wl_um)
hhdu.header['EXTNAME']='WFI_DRM15_H158'
fhdu = wf_hdu(F_tr,F_wl_um)
fhdu.header['EXTNAME']='WFI_DRM15_F184'
return (zhdu,yhdu,jhdu,whdu,hhdu,fhdu)
def wf_hdu(tr,um):
print(np.asarray(tr))
print(um)
col2 = pyfits.Column(name='totalrelativeefficiency',format='D',unit='relative fraction',array=np.asarray(tr,dtype=np.float64))
col1 = pyfits.Column(name='wavelength',format='D',unit='angstrom',array=np.asarray(um,dtype=np.float64)*1.0e4)
cols = pyfits.ColDefs([col1,col2])
zhdu = pyfits.BinTableHDU.from_columns(cols)
return zhdu
def output_sunrise_filter_directory(fitsfile,dirname):
f = pyfits.open(fitsfile)
if not os.path.lexists(dirname):
os.mkdir(dirname)
all_list = []
grism_list = []
for hdu in f[1:]:
#print hdu.header['EXTNAME']
filter_name = os.path.join(dirname,hdu.header['EXTNAME'])
filter_wl = hdu.data['wavelength']
filter_tr = hdu.data['totalrelativeefficiency']
data = astropy.table.Table([np.round(filter_wl,4),np.round(filter_tr,4)],names=['wl','tr'])
dn = os.path.dirname(filter_name)
if not os.path.lexists(dn):
os.mkdir(dn)
all_list.append(hdu.header['EXTNAME'])
if hdu.header['EXTNAME'][0:5]=='grism':
grism_list.append(hdu.header['EXTNAME'])
ascii.write(data,filter_name,Writer=ascii.NoHeader)
#output_lists
#print all_list
#print grism_list
st_list = ['hst/wfc3_f336w',
'hst/acs_f435w',
'hst/acs_f606w',
'hst/acs_f814w',
'hst/wfc3_f125w',
'hst/wfc3_f160w',
'wfirst/wfi_r062',
'wfirst/wfi_z087',
'wfirst/wfi_y106',
'wfirst/wfi_j129',
'wfirst/wfi_w146',
'wfirst/wfi_h158',
'wfirst/wfi_f184',
'jwst/nircam_f115w',
'jwst/nircam_f150w',
'jwst/nircam_f200w',
'jwst/nircam_f277w',
'jwst/nircam_f356w',
'jwst/nircam_f444w',
'jwst/miri_F770W',
'jwst/miri_F1500W']
rest_list = ['standard/wfcam_j',
'standard/wfcam_h',
'standard/wfcam_k',
'standard/bessel_b',
'standard/bessel_i',
'standard/bessel_r',
'standard/bessel_u',
'standard/bessel_v']
other_list = ['galex/fuv_1500',
'galex/nuv_2500',
'sdss/u',
'sdss/g',
'sdss/r',
'sdss/i',
'sdss/z',
'newfirm/newfirm_h1ccd',
'newfirm/newfirm_h2ccd',
'newfirm/newfirm_j1ccd',
'newfirm/newfirm_j2ccd',
'newfirm/newfirm_j3ccd',
'newfirm/newfirm_Kccd',
'lsst/lsst_u',
'lsst/lsst_g',
'lsst/lsst_r',
'lsst/lsst_i',
'lsst/lsst_z',
'lsst/lsst_y3']
all_table = astropy.table.Table([np.asarray(all_list)],names=['filtername'])
grism_table = astropy.table.Table([np.asarray(grism_list)],names=['filtername'])
st_table = astropy.table.Table([np.asarray(st_list)],names=['filtername'])
rest_table = astropy.table.Table([np.asarray(rest_list)],names=['filtername'])
other_table = astropy.table.Table([np.asarray(other_list)],names=['filtername'])
ascii.write(all_table,os.path.join(dirname,'filters_all'),Writer=ascii.NoHeader)
ascii.write(grism_table,os.path.join(dirname,'filters_grism'),Writer=ascii.NoHeader)
ascii.write(st_table,os.path.join(dirname,'filters_st'),Writer=ascii.NoHeader)
ascii.write(rest_table,os.path.join(dirname,'filters_rest'),Writer=ascii.NoHeader)
ascii.write(other_table,os.path.join(dirname,'filters_other'),Writer=ascii.NoHeader)
return
def input_old_filters(flist,folder):
names = np.asarray( ascii.read(flist,Reader=ascii.NoHeader))
hdus = []
#print flist
#print folder
#print names
for n in names:
#print n[0]
path = os.path.join(folder,n[0])
data = ascii.read(path)
wl_ang = np.round(np.asarray(data['col1']),4)
tr = np.round(np.asarray(data['col2']),8)
hdu = wf_hdu(tr,wl_ang/1.0e4)
hdu.header['EXTNAME']=n[0][0:-4]
#print hdu.header['EXTNAME']
hdus.append(hdu)
return hdus
def miri_filters():
flist = ['F560W_throughput.fits','F770W_throughput.fits','F1000W_throughput.fits','F1130W_throughput.fits','F1280W_throughput.fits','F1500W_throughput.fits','F1800W_throughput.fits','F2100W_throughput.fits','F2550W_throughput.fits']
hdus = []
for file in flist:
f = pyfits.open(os.path.join('MIRI/filters',file))
tr = f[1].data['THROUGHPUT']
wl = f[1].data['WAVELENGTH']
hdu = wf_hdu(tr,wl/1.0e4)
hdu.header['EXTNAME']='jwst/miri_'+file[:-16]
hdus.append(hdu)
return hdus
def nircam_filters():
flist = ['F070W_throughput.fits',
'F090W_throughput.fits',
'F115W_throughput.fits',
'F150W_throughput.fits',
'F200W_throughput.fits',
'F277W_throughput.fits',
'F356W_throughput.fits',
'F444W_throughput.fits',
'F410M_throughput.fits']
hdus = []
for file in flist:
f = pyfits.open(os.path.join('NIRCam_Filters',file))
tr = f[1].data['THROUGHPUT']
wl = f[1].data['WAVELENGTH']
hdu = wf_hdu(tr,wl/1.0e4)
hdu.header['EXTNAME']='jwst/nircam_'+file[:-16]
hdus.append(hdu)
return hdus
def make_grism_filters(start,stop,number):
micron_bins = np.logspace(np.log10(start),np.log10(stop),number)
hdus = []
for i,b in enumerate(micron_bins[1:]):
bin_start=micron_bins[i]
bin_stop=b
bin_width = bin_stop-bin_start
bin_center = (bin_start + bin_stop)/2.0
thisname='grism/gen1_'+'{:5.3f}'.format(bin_center)
#print bin_start, bin_stop, bin_width, bin_center, thisname
wl_start = bin_start-bin_width
wl_stop = bin_stop+bin_width
wl_grid = np.linspace(wl_start,wl_stop,30.0)
wl_tr = np.where(np.logical_and(wl_grid >= bin_start,wl_grid < bin_stop ),np.ones_like(wl_grid),np.zeros_like(wl_grid) )
this_hdu = wf_hdu(wl_tr,wl_grid)
this_hdu.header['EXTNAME']=thisname
hdus.append(this_hdu)
return hdus
if __name__=="__main__":
#get existing filters and add new ones
#note these filters are for simple BROADBAND test images, not full simulations, so accuracy=== MEH
wfirst_hdus = wfirst_filters()
#print wfirst_hdus[4].data['totalrelativeefficiency'], wfirst_hdus[4].data['wavelength'], wfirst_hdus[4].header.cards
primhdu = pyfits.PrimaryHDU()
newlist = pyfits.HDUList([primhdu])
for hdu in wfirst_hdus:
newlist.append(hdu)
all_filters = input_old_filters(os.path.expandvars('$HOME/Dropbox/Projects/FILTERS/sunrise_data/filters_redshifted'),os.path.expandvars('$HOME/Dropbox/Projects/FILTERS/sunrise_data/filters'))
rest_filters = input_old_filters(os.path.expandvars('$HOME/Dropbox/Projects/FILTERS/sunrise_data/filters_restframe_new'),os.path.expandvars('$HOME/Dropbox/Projects/FILTERS/sunrise_data/filters'))
for hdu in all_filters:
newlist.append(hdu)
for hdu in rest_filters:
newlist.append(hdu)
miri_hdus = miri_filters()
for hdu in miri_hdus:
newlist.append(hdu)
nircam_hdus = nircam_filters()
for hdu in nircam_hdus:
newlist.append(hdu)
gen1_grisms = make_grism_filters(0.69,5.0,250)
#grs_grisms = make_grism_filters(1.35,1.89,250)
for hdu in gen1_grisms:
newlist.append(hdu)
packagef = 'filterpackage.fits'
newlist.writeto(packagef,clobber=True)
output_sunrise_filter_directory(packagef,'sunrise_filters')
| mit |
kernc/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
soylentdeen/cuddly-weasel | gfVerification/compareGfs.py | 1 | 3065 | import matplotlib.pyplot as pyplot
import numpy
import scipy
import sys
import MoogTools
import AstroUtils
fig = pyplot.figure(0)
fig.clear()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
baseName = sys.argv[1]
arcturusConfig = AstroUtils.parse_config(baseName+"_Solar.cfg")
solarConfig = AstroUtils.parse_config(baseName+"_Arcturus.cfg")
originalConfig = arcturusConfig.copy()
arcturusConfig["applyCorrections"] = True
solarConfig["applyCorrections"] = True
arcturus = MoogTools.LineList(None, arcturusConfig)
solar = MoogTools.LineList(None, solarConfig)
original = MoogTools.LineList(None, originalConfig)
orig = []
sol = []
arc = []
diff = []
dsol = []
darc = []
species = []
expot = []
wl = []
for i in range(original.nStrong):
orig.append(original.strongLines[i].loggf)
sol.append(solar.strongLines[i].loggf)
arc.append(arcturus.strongLines[i].loggf)
wl.append(arcturus.strongLines[i].wl)
diff.append(sol[-1] - arc[-1])
dsol.append(sol[-1] - orig[-1])
darc.append(arc[-1] - orig[-1])
species.append(arcturus.strongLines[i].species)
expot.append(arcturus.strongLines[i].expot_lo)
for i in range(original.numLines - original.nStrong):
orig.append(original.weakLines[i].loggf)
sol.append(solar.weakLines[i].loggf)
arc.append(arcturus.weakLines[i].loggf)
wl.append(arcturus.weakLines[i].wl)
diff.append(sol[-1] - arc[-1])
dsol.append(sol[-1] - orig[-1])
darc.append(arc[-1] - orig[-1])
species.append(arcturus.weakLines[i].species)
expot.append(arcturus.weakLines[i].expot_lo)
diff = numpy.array(diff)
orig = numpy.array(orig)
sol = numpy.array(sol)
arc = numpy.array(arc)
dsol = numpy.array(dsol)
darc = numpy.array(darc)
species = numpy.array(species)
expot = numpy.array(expot)
wl = numpy.array(wl)
changed = diff != 0.0
#ax.plot([-6, 0], [-6, 0])
#for w, s, a, sp, e in zip(wl[changed], dsol[changed], darc[changed], species[changed],
# expot[changed]):
# e *= 10.
# ax.plot([w, w], [s, a], color = 'k')
# ax.scatter([w], [s], color = 'b', s=[e,e])
# ax.scatter([w], [a], color = 'r', s=[e,e])
#ax.plot([numpy.min(wl), numpy.max(wl)], [0.0, 0.0])
ax.scatter(expot[changed], dsol[changed], label='Solar', color = 'b')
ax.scatter(expot[changed], darc[changed], label='Arcturus', color = 'r')
#ax.scatter(wl[changed], dsol[changed]/10, color = 'b', label='Solar', s=species[changed])
#ax.scatter(wl[changed], darc[changed]/10, color = 'r', label='Arcturus', s=species[changed])
#for
#ax.plot([-4, 1.0], [-4, 1.0])
ax.legend(loc=3)
#ax.set_xbound(-4.0, 1.0)
#ax.set_ybound(-4.0, 1.0)
ax.set_xlabel("Excitation Potential (eV)")
ax.set_ylabel("Delta log gf")
#ax.scatter(expot[changed], diff[changed])
#ax.scatter(orig[changed], dsol[changed], color = 'r')
#ax.scatter(orig[changed], darc[changed], color = 'b')
#ax.plot([0.0, 8.0], [0.0, 0.0])
#ax.scatter(species[changed]+1, orig[changed], color = 'k')
#ax.scatter(species[changed], arc[changed], color = 'b')
#ax.scatter(species[changed], sol[changed], color = 'r')
fig.show()
fig.savefig("loggf_changes.png")
| mit |
adamLange/moose | examples/ex14_pps/plot.py | 14 | 1194 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import csv
# Python 2.7 does not have str.isnumeric()?
def isInt(string):
try:
int(string)
return True
except ValueError:
return False
# Format of the CSV file is:
# time,dofs,integral
# 1,221,2.3592493758695,
# 2,841,0.30939803328432,
# 3,3281,0.088619511656913,
# 4,12961,0.022979021365857,
# 5,51521,0.0057978748995635,
# 6,205441,0.0014528130907967,
reader = csv.reader(file('out.csv'))
dofs = []
errs = []
for row in reader:
if row and isInt(row[0]): # Skip rows that don't start with numbers.
dofs.append(int(row[1]))
errs.append(float(row[2]))
# Construct data to be plotted
xdata = np.log10(np.sqrt(dofs))
ydata = np.log10(errs)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(xdata, ydata, 'bo-')
ax1.set_xlabel('log (1/h)')
ax1.set_ylabel('log (L2-error)')
# Create linear curve fits of the data, but just the last couple data
# point when we are in the asymptotic regime.
fit = np.polyfit(xdata[2:-1], ydata[2:-1], 1)
fit_msg = 'Slope ~ ' + '%.2f' % fit[0]
ax1.text(2.0, -1.0, fit_msg)
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
etamponi/emetrics | emetrics/evaluation/manual_experiment.py | 1 | 1778 | from scipy.stats.stats import pearsonr
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from emetrics.coefficients.association_measure import AssociationMeasure
from emetrics.correlation_score import CorrelationScore
from emetrics.evaluation.random_subsets_experiment import RandomSubsetsExperiment
from emetrics.label_encoders.ordinal_label_encoder import OrdinalLabelEncoder
from emetrics.preparers.bootstrap_sampler import BootstrapSampler
from emetrics.preparers.noise_injector import NoiseInjector
__author__ = 'Emanuele Tamponi'
def main():
subset_sizes = range(1, 6)
for subset_size in subset_sizes:
experiment = RandomSubsetsExperiment(
dataset="iris",
subset_size=subset_size,
scorers=[
("wilks", CorrelationScore(
coefficient=AssociationMeasure(
measure="wilks"
),
preparer_pipeline=[
BootstrapSampler(sampling_percent=100),
NoiseInjector(stddev=1e-6),
],
label_encoder=OrdinalLabelEncoder()
))
],
classifiers=[
("dt", DecisionTreeClassifier()),
("rf", RandomForestClassifier())
],
n_folds=10,
n_runs=10
)
results = experiment.run()
if results is None:
continue
for classifier in results["errors"]:
corr, _ = pearsonr(results["scores"]["wilks"], results["errors"][classifier])
print "{} - correlation with {}: {:.3f}".format(subset_size, classifier, corr)
if __name__ == "__main__":
main()
| gpl-2.0 |
ilo10/scikit-learn | examples/svm/plot_rbf_parameters.py | 57 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radius Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
mrnameless123/advance-image-processing-class | utils.py | 1 | 13917 | import numpy as np
import warnings
from sklearn.preprocessing import normalize
from enum import Enum
warnings.filterwarnings('error')
print('Imported',__name__)
class BroadCastType(Enum):
BC_VERTICAL = 0
BC_HORIZONTAL = 1
def func_compute_rms(param_input):
print(np.linalg.norm(param_input))
# sumvalue = 0
# for x in param_input:
# for y in x:
# sumvalue += y**2
# print(np.sqrt(sumvalue))
def compute_distance(array_a, array_b):
distance = []
for x in array_a:
tmp = []
for y in array_b:
try:
item = x-y
except Exception as argument:
print('Exception Occured:',argument)
return
else:
value = np.power(np.linalg.norm(item),2)
tmp.append(value)
distance.append(tmp)
return np.array(distance)
def index_of_value_in_array(param_array, param_value):
array = np.array(param_array)
if array.ndim == 1:
result1d = np.where(param_array == param_value)
try:
result1d[0]
except Exception as argument:
print('index_of_value_in_array Exception occurred: {0}'.format(argument))
return None
else:
return np.array(result1d)
elif array.ndim == 2:
result2d = np.where(param_array == param_value)
try:
result2d[0][0]
except Exception as argument:
print('index_of_value_in_array Exception occurred: {0}'.format(argument))
return None
else:
tmp1 = result2d[0]
tmp2 = result2d[1]
return np.vstack([tmp1,tmp2]).T
elif array.ndim == 3:
result3d = np.where(param_array == param_value)
try:
result3d[0][0][0]
except Exception as argument:
print('index_of_value_in_array Exception occurred: {0}'.format(argument))
return None
else:
tmp1 = result3d[0]
tmp2 = result3d[1]
tmp3 = result3d[2]
return np.vstack([tmp1,tmp2,tmp3]).T
def func_broadcast_array(param_broadcaster, param_new_shape, param_broadcast_axis = BroadCastType.BC_VERTICAL):
clone = np.array(param_broadcaster)
output = np.array(param_broadcaster)
iteration = int(param_new_shape)
if param_broadcast_axis == BroadCastType.BC_VERTICAL:
for x in range(iteration - 1):
output = np.vstack([output, clone])
if output.shape[0] != iteration:
print('func_broadcast_array experienced exception: {0} != {1} shape is {2}'.format(int(output.shape[1]), iteration, np.shape(output)))
input()
return output
elif param_broadcast_axis == BroadCastType.BC_HORIZONTAL:
for x in range(iteration - 1):
output = np.hstack([output, clone])
if output.shape[1] != iteration:
print('func_broadcast_array experienced exception: {0} != {1} shape is {2}'.format(int(output.shape[0]), iteration, np.shape(output)))
input()
return output
def func_my_normalize(param_input):
row, col = param_input.shape
tmp = np.reshape(param_input, (1, row * col)).astype(np.float64)
normed_matrix = normalize(tmp.astype(np.float64), axis=1, norm='max')
normed_image = np.reshape(normed_matrix, param_input.shape)
max_value = np.amax(param_input)
return normed_image, max_value
def func_verify_image(param_input, threshold_1 = None, hough_thres = False):
try:
clone = param_input.copy()
x, y = clone.shape
for i in range(x):
for j in range(y):
if threshold_1 is None:
if clone[i][j] > 255:
clone[i][j] = 255
elif clone[i][j] < 0:
clone[i][j] = 0
else:
if clone[i][j] > threshold_1:
clone[i][j] = hough_thres and 1 or 255
elif clone[i][j] < threshold_1:
clone[i][j] = 0
except Exception as Argument:
print('func_verify_image exception occurred: {0}'.format(param_input))
input()
else:
return np.array(clone, dtype= np.uint8)
def func_add_noisy(image, noise_typ = 'gaussian', **kwargs):
mode = noise_typ.lower()
allowed_types = {
'gaussian': 'gaussian_values',
'laplacian': 'laplacian_values',
'local_var': 'local_var_values',
'poisson': 'poisson_values',
'salt': 'sp_values',
'pepper': 'sp_values',
's&p': 's&p_values',
'speckle': 'gaussian_values'}
kw_defaults = {
'mean': 0.,
'var': 10,
'exponential_decay' : 1.0,
'amount': 0.005,
'salt_vs_pepper': 0.5,
'local_vars': np.zeros_like(image) + 0.01}
allowed_kwargs = {
'gaussian_values': ['mean', 'var'],
'laplacian_values': ['mean', 'exponential_decay'],
'local_var_values': ['local_vars'],
'sp_values': ['amount', 'salt_vs_pepper'],
's&p_values': ['amount', 'salt_vs_pepper'],
'poisson_values': []}
#Check if the parameter is correct or not
for key in kwargs:
if key not in allowed_kwargs[allowed_types[mode]]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowed_kwargs[allowed_types[mode]]))
for kw in allowed_kwargs[allowed_types[mode]]:
kwargs.setdefault(kw, kw_defaults[kw])
#Normalize input image
row, col = image.shape
tmp = np.reshape(image, (1, row*col)).astype(np.float64)
normed_matrix = normalize(tmp.astype(np.float64), axis=1, norm='max')
normed_image = np.reshape(normed_matrix, image.shape)
max_value = np.amax(image)
max_value = 1
normed_image = image
#Process add noise to image according to type of noise
if noise_typ == 'gaussian':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
normed_image.shape)
func_compute_rms(noise)
noised_img = normed_image + noise
return func_verify_image(noised_img*max_value)
elif noise_typ == 'laplacian':
'''y = ln(2x) if x\in(0, 0.5) otherwise -ln(2-2x) if x\in(0.5, 1)
f(x; \mu, \lambda) = \frac{1}{2\lambda} \exp\left(-\frac{|x - \mu|}{\lambda}\right).
'''
noise = np.random.laplace(kwargs['mean'], kwargs['exponential_decay'], normed_image.shape)
noised_img = normed_image + noise;
return func_verify_image(noised_img*max_value)
elif noise_typ == 'salt':
out = normed_image
# Salt mode
num_salt = np.ceil(kwargs['amount'] * normed_image.size * kwargs['salt_vs_pepper'])
co_ordinates = [np.random.randint(0, i - 1, int(num_salt))
for i in normed_image.shape]
out[co_ordinates] = 1
return func_verify_image(out*max_value)
elif noise_typ == 'pepper':
# Pepper mode
out = normed_image
num_pepper = np.ceil(kwargs['amount'] * normed_image.size * (1. - kwargs['salt_vs_pepper']))
co_ordinates = [np.random.randint(0, i - 1, int(num_pepper))
for i in normed_image.shape]
out[co_ordinates] = 0
return func_verify_image(out*max_value)
elif noise_typ == "s&p":
out = normed_image
# Salt mode
num_salt = np.ceil(kwargs['amount'] * normed_image.size * kwargs['salt_vs_pepper'])
co_ordinates = [np.random.randint(0, i - 1, int(num_salt))
for i in normed_image.shape]
out[co_ordinates] = 255
# Pepper mode
num_pepper = np.ceil(kwargs['amount'] * normed_image.size * (1. - kwargs['salt_vs_pepper']))
co_ordinates = [np.random.randint(0, i - 1, int(num_pepper))
for i in normed_image.shape]
out[co_ordinates] = 0
return out * max_value
elif noise_typ == 'poisson':
values = len(np.unique(normed_image))
values = 2 ** np.ceil(np.log2(values))
noisy = np.random.poisson(normed_image * values) / float(values)
return func_verify_image(noisy*max_value)
elif noise_typ == 'speckle':
gauss = np.random.randn(row, col)
gauss = gauss.reshape(row, col)
noisy = normed_image + normed_image * gauss
return func_verify_image(noisy*max_value)
def func_manual_rotate_image_interpolation(param_input, param_angle, param_interpolation=0):
rows, cols = np.array(param_input).shape
ratio = (param_angle / 180) * np.pi
origin_center = np.array([np.round(rows / 2), np.round(cols / 2)])
new_row = np.int(np.abs(rows * np.cos(ratio)) + np.abs(cols * np.sin(ratio)))
new_col = np.int(np.abs(rows * np.sin(ratio)) + np.abs(cols * np.cos(ratio)))
output = np.zeros((new_row, new_col))
kernel = np.array([[np.cos(ratio), - np.sin(ratio)], [np.sin(ratio), np.cos(ratio)]])
output_center = np.array([np.round(new_row / 2), np.round(new_col / 2)])
if param_interpolation == 0: # Nearest interpolation
try:
for x in range(new_row):
for y in range(new_col):
vector_rotate = np.array([x - output_center[0], y - output_center[1]])
tmp = np.dot(kernel.T, vector_rotate)
rotate_momentum = np.array(tmp + origin_center, dtype=np.int)
if 0 < rotate_momentum[0] < rows and 0 < rotate_momentum[1] < cols:
output[x][y] = param_input[rotate_momentum[0]][rotate_momentum[1]]
except Exception as Argument:
print('func_manual_rotate_image_interpolation exception occurred: {0}'.format(Argument))
input()
else:
return output
elif param_interpolation == 1: # Bilinear interpolation
try:
for x in range(new_row):
for y in range(new_col):
vector_rotate = np.array([x - output_center[0], y - output_center[1]])
tmp = np.dot(kernel.T, vector_rotate)
rotate_momentum = tmp + origin_center
x1, y1 = np.floor(rotate_momentum).astype(dtype=np.int)
x2, y2 = np.ceil(rotate_momentum - np.floor(rotate_momentum)).astype(dtype=np.int)
if 0 < x1 < rows-1 and 0 < y1 < cols-1 :
output[x][y] = (1-x2)*(1-y2)*param_input[x1][y1] + (1-x2)*y2*param_input[x1][y1+1] + x2*(1-y2)*param_input[x1+1][y1] + x2*y2*param_input[x1+1][y1+1]
except Exception as Argument:
print('func_manual_rotate_image_interpolation exception occurred: {0}'.format(Argument))
input()
else:
return output
# This function visualizes filters in matrix A. Each column of A is a
# filter. We will reshape each column into a square image and visualizes
# on each cell of the visualization panel.
# All other parameters are optional, usually you do not need to worry
# about it.
# opt_normalize: whether we need to normalize the filter so that all of
# them can have similar contrast. Default value is true.
# opt_graycolor: whether we use gray as the heat map. Default is true.
# opt_colmajor: you can switch convention to row major for A. In that
# case, each row of A is a filter. Default value is false.
# source: https://github.com/tsaith/ufldl_tutorial
def display_network(A, m=-1, n=-1):
opt_normalize = True
opt_graycolor = True
# Rescale
A = A - np.average(A)
# Compute rows & cols
(row, col) = A.shape
sz = int(np.ceil(np.sqrt(row)))
buf = 1
if m < 0 or n < 0:
n = np.ceil(np.sqrt(col))
m = np.ceil(col / n)
image = np.ones(shape=(buf + m * (sz + buf), buf + n * (sz + buf)))
if not opt_graycolor:
image *= 0.1
k = 0
for i in range(int(m)):
for j in range(int(n)):
if k >= col:
continue
clim = np.max(np.abs(A[:, k]))
if opt_normalize:
image[buf + i * (sz + buf):buf + i * (sz + buf) + sz, buf + j * (sz + buf):buf + j * (sz + buf) + sz] = \
A[:, k].reshape(sz, sz) / clim
else:
image[buf + i * (sz + buf):buf + i * (sz + buf) + sz, buf + j * (sz + buf):buf + j * (sz + buf) + sz] = \
A[:, k].reshape(sz, sz) / np.max(np.abs(A))
k += 1
return image
def display_color_network(A):
"""
# display receptive field(s) or basis vector(s) for image patches
#
# A the basis, with patches as column vectors
# In case the midpoint is not set at 0, we shift it dynamically
:param A:
:param file:
:return:
"""
if np.min(A) >= 0:
A = A - np.mean(A)
cols = np.round(np.sqrt(A.shape[1]))
channel_size = A.shape[0] / 3
dim = np.sqrt(channel_size)
dimp = dim + 1
rows = np.ceil(A.shape[1] / cols)
B = A[0:channel_size, :]
C = A[channel_size:2 * channel_size, :]
D = A[2 * channel_size:3 * channel_size, :]
B = B / np.max(np.abs(B))
C = C / np.max(np.abs(C))
D = D / np.max(np.abs(D))
# Initialization of the image
image = np.ones(shape=(dim * rows + rows - 1, dim * cols + cols - 1, 3))
for i in range(int(rows)):
for j in range(int(cols)):
# This sets the patch
image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 0] = B[:, i * cols + j].reshape(dim, dim)
image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 1] = C[:, i * cols + j].reshape(dim, dim)
image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 2] = D[:, i * cols + j].reshape(dim, dim)
image = (image + 1) / 2
# PIL.Image.fromarray(np.uint8(image * 255), 'RGB').save(filename)
return image | gpl-3.0 |
drphilmarshall/Pangloss | doc/pgm/pgm_color.py | 2 | 3330 | # ============================================================================
# PGM for the SDSI proposal
#
# Phil Marshall, September 2014
# ============================================================================
from matplotlib import rc
rc("font", family="serif", size=10)
rc("text", usetex=True)
import daft
figshape = (8.6, 5.0)
figorigin = (-0.6, -0.4)
# Colors.
red = {"ec": "red"}
orange = {"ec": "orange"}
green = {"ec": "green"}
blue = {"ec": "blue"}
violet = {"ec": "violet"}
# Start the chart:
pgm = daft.PGM(figshape, origin=figorigin)
# Foreground galaxies branch:
pgm.add_node(daft.Node("cosmo1", r"${\bf \Omega}_g$", 1.9, 3.8,plot_params=violet))
pgm.add_node(daft.Node("Mhd", r"$M_{{\rm h},i}$", 1.3, 2.2,plot_params=green))
pgm.add_node(daft.Node("zd", r"$z_i$", 2.2, 2.8,plot_params=green))
pgm.add_node(daft.Node("xd", r"${\bf x}_i$", 2.2, 0.7, fixed=True))
pgm.add_node(daft.Node("Mstard", r"$M^{*}_i$", 2.2, 1.6,plot_params=orange))
pgm.add_node(daft.Node("phid", r"${\bf \phi}_i$", 1.3, 1.1, observed=True))
pgm.add_node(daft.Node("alphad", r"$\alpha$", 0.2, 1.6,plot_params=red))
# Background galaxies:
pgm.add_node(daft.Node("cosmo2", r"${\bf \Omega}_e$", 3.0, 4.0,plot_params=violet))
pgm.add_node(daft.Node("sigcrit", r"$\Sigma^{\rm crit}_{ij}$", 3.0, 2.4,plot_params=blue))
pgm.add_node(daft.Node("gamma", r"$\gamma_j$", 4.0, 1.8,plot_params=blue))
pgm.add_node(daft.Node("elens", r"$\epsilon^{\rm lens}_j$", 4.5, 1.2,plot_params=blue))
pgm.add_node(daft.Node("eobs", r"$\epsilon^{\rm obs}_j$", 4.5, 0.4, observed=True))
pgm.add_node(daft.Node("Mh", r"$M_{{\rm h},j}$", 6.0, 3.0,plot_params=green))
pgm.add_node(daft.Node("z", r"$z_j$", 5.0, 3.0,plot_params=green))
pgm.add_node(daft.Node("x", r"${\bf x}_j$", 4.6, 2.4, fixed=True))
pgm.add_node(daft.Node("cosmo3", r"${\bf \Omega}_g$", 5.6, 4.0,plot_params=violet))
pgm.add_node(daft.Node("Mstar", r"$M^{*}_j$", 5.6, 2.4,plot_params=orange))
pgm.add_node(daft.Node("alpha", r"${\bf \alpha}$", 7.2, 2.4,plot_params=red))
pgm.add_node(daft.Node("epsilon", r"$\epsilon_j$", 6.0, 1.4))
pgm.add_node(daft.Node("phi", r"${\bf \phi}_j$", 5.0, 1.8, observed=True))
# Now connect the dots:
pgm.add_edge("cosmo1", "Mhd")
pgm.add_edge("cosmo1", "zd")
pgm.add_edge("Mhd", "Mstard")
pgm.add_edge("zd", "Mstard")
pgm.add_edge("alphad", "Mstard")
pgm.add_edge("zd", "phid")
pgm.add_edge("Mstard", "phid")
pgm.add_edge("zd", "sigcrit")
pgm.add_edge("cosmo2", "sigcrit")
pgm.add_edge("z", "sigcrit")
pgm.add_edge("xd", "gamma")
pgm.add_edge("Mhd", "gamma")
pgm.add_edge("sigcrit", "gamma")
pgm.add_edge("x", "gamma")
pgm.add_edge("gamma", "elens")
pgm.add_edge("elens", "eobs")
pgm.add_edge("cosmo3", "Mh")
pgm.add_edge("cosmo3", "z")
pgm.add_edge("Mh", "Mstar")
pgm.add_edge("z", "Mstar")
pgm.add_edge("alpha", "Mstar")
pgm.add_edge("Mstar", "epsilon")
pgm.add_edge("Mstar", "phi")
pgm.add_edge("z", "phi")
pgm.add_edge("epsilon", "elens")
#Add plate over deflectors
pgm.add_plate(daft.Plate([0.7, 0.2, 2.7, 3.2], label=r"foreground galaxies $i$", label_offset=[5, 5]))
# Add plate over sources
pgm.add_plate(daft.Plate([2.6, 0.0, 4.0, 3.6], label=r"background galaxies $j$", label_offset=[120, 5]))
pgm.render()
pgm.figure.savefig("pgm_color.png", dpi=220)
# ============================================================================
| gpl-2.0 |
drscotthawley/audio-classifier-keras-cnn | eval_network.py | 1 | 10089 | from __future__ import print_function
'''
Classify sounds using database - evaluation code
Author: Scott H. Hawley
This is kind of a mixture of Keun Woo Choi's code https://github.com/keunwoochoi/music-auto_tagging-keras
and the MNIST classifier at https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
Trained using Fraunhofer IDMT's database of monophonic guitar effects,
clips were 2 seconds long, sampled at 44100 Hz
'''
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
from keras.models import Sequential, Model
from keras.layers import Input, Dense, TimeDistributed, LSTM, Dropout, Activation
from keras.layers import Convolution2D, MaxPooling2D, Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU
from keras.callbacks import ModelCheckpoint
from keras import backend
from keras.utils import np_utils
import os
from os.path import isfile
from sklearn.metrics import roc_auc_score
from timeit import default_timer as timer
from sklearn.metrics import roc_auc_score, roc_curve, auc
mono=True
def get_class_names(path="Preproc/"): # class names are subdirectory names in Preproc/ directory
class_names = os.listdir(path)
return class_names
def get_total_files(path="Preproc/",train_percentage=0.8):
sum_total = 0
sum_train = 0
sum_test = 0
subdirs = os.listdir(path)
for subdir in subdirs:
files = os.listdir(path+subdir)
n_files = len(files)
sum_total += n_files
n_train = int(train_percentage*n_files)
n_test = n_files - n_train
sum_train += n_train
sum_test += n_test
return sum_total, sum_train, sum_test
def get_sample_dimensions(path='Preproc/'):
classname = os.listdir(path)[0]
files = os.listdir(path+classname)
infilename = files[0]
audio_path = path + classname + '/' + infilename
melgram = np.load(audio_path)
print(" get_sample_dimensions: melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names): # makes a "one-hot" vector for each class name called
try:
idx = class_names.index(class_name)
vec = np.zeros(len(class_names))
vec[idx] = 1
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def build_datasets(train_percentage=0.8, preproc=False):
'''
So we make the training & testing datasets here, and we do it separately.
Why not just make one big dataset, shuffle, and then split into train & test?
because we want to make sure statistics in training & testing are as similar as possible
'''
if (preproc):
path = "Preproc/"
else:
path = "Samples/"
class_names = get_class_names(path=path)
print("class_names = ",class_names)
total_files, total_train, total_test = get_total_files(path=path, train_percentage=train_percentage)
print("total files = ",total_files)
nb_classes = len(class_names)
mel_dims = get_sample_dimensions(path=path)
# pre-allocate memory for speed (old method used np.concatenate, slow)
X_train = np.zeros((total_train, mel_dims[1], mel_dims[2], mel_dims[3]))
Y_train = np.zeros((total_train, nb_classes))
X_test = np.zeros((total_test, mel_dims[1], mel_dims[2], mel_dims[3]))
Y_test = np.zeros((total_test, nb_classes))
paths_train = []
paths_test = []
train_count = 0
test_count = 0
for idx, classname in enumerate(class_names):
this_Y = np.array(encode_class(classname,class_names) )
this_Y = this_Y[np.newaxis,:]
class_files = os.listdir(path+classname)
n_files = len(class_files)
n_load = n_files
n_train = int(train_percentage * n_load)
printevery = 100
print("")
for idx2, infilename in enumerate(class_files[0:n_load]):
audio_path = path + classname + '/' + infilename
if (0 == idx2 % printevery):
print('\r Loading class: {:14s} ({:2d} of {:2d} classes)'.format(classname,idx+1,nb_classes),
", file ",idx2+1," of ",n_load,": ",audio_path,sep="")
#start = timer()
if (preproc):
melgram = np.load(audio_path)
sr = 44100
else:
aud, sr = librosa.load(audio_path, mono=mono,sr=None)
melgram = librosa.logamplitude(librosa.feature.melspectrogram(aud, sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
#end = timer()
#print("time = ",end - start)
melgram = melgram[:,:,:,0:mel_dims[3]] # just in case files are differnt sizes: clip to first file size
if (idx2 < n_train):
# concatenate is SLOW for big datasets; use pre-allocated instead
#X_train = np.concatenate((X_train, melgram), axis=0)
#Y_train = np.concatenate((Y_train, this_Y), axis=0)
X_train[train_count,:,:] = melgram
Y_train[train_count,:] = this_Y
paths_train.append(audio_path) # list-appending is still fast. (??)
train_count += 1
else:
X_test[test_count,:,:] = melgram
Y_test[test_count,:] = this_Y
#X_test = np.concatenate((X_test, melgram), axis=0)
#Y_test = np.concatenate((Y_test, this_Y), axis=0)
paths_test.append(audio_path)
test_count += 1
print("")
print("Shuffling order of data...")
X_train, Y_train, paths_train = shuffle_XY_paths(X_train, Y_train, paths_train)
X_test, Y_test, paths_test = shuffle_XY_paths(X_test, Y_test, paths_test)
return X_train, Y_train, paths_train, X_test, Y_test, paths_test, class_names, sr
def build_model(X,Y,nb_classes):
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # size of pooling area for max pooling
kernel_size = (3, 3) # convolution kernel size
nb_layers = 4
input_shape = (1, X.shape[2], X.shape[3])
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid', input_shape=input_shape))
model.add(BatchNormalization(axis=1, mode=2))
model.add(Activation('relu'))
for layer in range(nb_layers-1):
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(BatchNormalization(axis=1, mode=2))
model.add(ELU(alpha=1.0))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
return model
if __name__ == '__main__':
np.random.seed(1)
# get the data
X_train, Y_train, paths_train, X_test, Y_test, paths_test, class_names, sr = build_datasets(preproc=True)
# make the model
model = build_model(X_train,Y_train, nb_classes=len(class_names))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.summary()
# Initialize weights using checkpoint if it exists. (Checkpointing requires h5py)
checkpoint_filepath = 'weights.hdf5'
if (True):
print("Looking for previous weights...")
if ( isfile(checkpoint_filepath) ):
print ('Checkpoint file detected. Loading weights.')
model.load_weights(checkpoint_filepath)
else:
print ('No checkpoint file detected. You gotta train_network first.')
exit(1)
else:
print('Starting from scratch (no checkpoint)')
print("class names = ",class_names)
batch_size = 128
num_pred = X_test.shape[0]
# evaluate the model
print("Running model.evaluate...")
scores = model.evaluate(X_test, Y_test, verbose=1, batch_size=batch_size)
print('Test score:', scores[0])
print('Test accuracy:', scores[1])
print("Running predict_proba...")
y_scores = model.predict_proba(X_test[0:num_pred,:,:,:],batch_size=batch_size)
auc_score = roc_auc_score(Y_test, y_scores)
print("AUC = ",auc_score)
n_classes = len(class_names)
print(" Counting mistakes ")
mistakes = np.zeros(n_classes)
for i in range(Y_test.shape[0]):
pred = decode_class(y_scores[i],class_names)
true = decode_class(Y_test[i],class_names)
if (pred != true):
mistakes[true] += 1
mistakes_sum = int(np.sum(mistakes))
print(" Found",mistakes_sum,"mistakes out of",Y_test.shape[0],"attempts")
print(" Mistakes by class: ",mistakes)
print("Generating ROC curves...")
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(Y_test[:, i], y_scores[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
plt.figure()
lw = 2
for i in range(n_classes):
plt.plot(fpr[i], tpr[i],
lw=lw, label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| mit |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_simulate_raw_data.py | 6 | 2822 | """
===========================
Generate simulated raw data
===========================
This example generates raw data by repeating a desired source
activation multiple times.
"""
# Authors: Yousra Bekhti <[email protected]>
# Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import read_source_spaces, find_events, Epochs, compute_covariance
from mne.datasets import sample
from mne.simulation import simulate_sparse_stc, simulate_raw
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
src_fname = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
bem_fname = (data_path +
'/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif')
# Load real data as the template
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
raw = raw.crop(0., 30.) # 30 sec is enough
##############################################################################
# Generate dipole time series
n_dipoles = 4 # number of dipoles to create
epoch_duration = 2. # duration of each epoch/event
n = 0 # harmonic number
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
data *= window
return data
times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
src = read_source_spaces(src_fname)
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
data_fun=data_fun, random_state=0)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
mne.viz.utils.plt_show()
##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw, stc, trans_fname, src, bem_fname, cov='simple',
iir_filter=[0.2, -0.2, 0.04], ecg=True, blink=True,
n_jobs=1, verbose=True)
raw_sim.plot()
##############################################################################
# Plot evoked data
events = find_events(raw_sim) # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, -0.2, epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical',
verbose='error') # quick calc
evoked = epochs.average()
evoked.plot_white(cov, time_unit='s')
| bsd-3-clause |
VandroiyLabs/FaroresWind | faroreswind/client/client.py | 1 | 1088 | ## System libraries
import json
## numerical libraries
import numpy as np
## plot
import matplotlib
#matplotlib.use('Agg')
import pylab as pl
## gpg library
import gnupg
## URL library
import urllib2
class client:
def __init__(self, config='client_config'):
self.parseConfig( config )
return
def parseConfig(self, config):
# Opening the configuration file
self.config = json.loads( open(config, 'r').read() )
return
def retrieveData(self, datei, timei, datef, timef, enose):
url = self.config['host'] + "/serveTimeSeries?"
url += "datei=" + datei + "&timei=" + timei
url += "&datef=" + datef + "&timef=" + timef
url += "&enose=" + str(enose) + "&k=" + self.config['gpg_keyid']
msg = urllib2.urlopen(url).read()
gpg = gnupg.GPG(verbose = True, homedir=self.config['gpg_homedir'])
jsonDump = json.loads( str( gpg.decrypt(msg, passphrase=self.config['gpg_passphrase']) ) )
return np.array(jsonDump)
def retrieveMetadata():
return
| gpl-3.0 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/metrics/cluster/tests/test_unsupervised.py | 66 | 5806 | import numpy as np
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import silhouette_samples
from sklearn.metrics import pairwise_distances
from sklearn.metrics.cluster import calinski_harabaz_score
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X_dense = dataset.data
X_csr = csr_matrix(X_dense)
X_dok = sp.dok_matrix(X_dense)
X_lil = sp.lil_matrix(X_dense)
y = dataset.target
for X in [X_dense, X_csr, X_dok, X_lil]:
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
score_precomputed = silhouette_score(D, y, metric='precomputed')
assert_greater(score_precomputed, 0)
# Test without calculating D
score_euclidean = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(score_precomputed, score_euclidean)
if X is X_dense:
score_dense_without_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean,
score_dense_without_sampling)
# Test with sampling
score_precomputed = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
score_euclidean = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert_greater(score_precomputed, 0)
assert_greater(score_euclidean, 0)
assert_almost_equal(score_euclidean, score_precomputed)
if X is X_dense:
score_dense_with_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean, score_dense_with_sampling)
def test_cluster_size_1():
# Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster
# (cluster 0). We also test the case where there are identical samples
# as the only members of a cluster (cluster 2). To our knowledge, this case
# is not discussed in reference material, and we choose for it a sample
# score of 1.
X = [[0.], [1.], [1.], [2.], [3.], [3.]]
labels = np.array([0, 1, 1, 1, 2, 2])
# Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention
# Cluster 1: intra-cluster = [.5, .5, 1]
# inter-cluster = [1, 1, 1]
# silhouette = [.5, .5, 0]
# Cluster 2: intra-cluster = [0, 0]
# inter-cluster = [arbitrary, arbitrary]
# silhouette = [1., 1.]
silhouette = silhouette_score(X, labels)
assert_false(np.isnan(silhouette))
ss = silhouette_samples(X, labels)
assert_array_equal(ss, [0, .5, .5, 0, 1, 1])
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert_equal(
silhouette_score(X, labels * 2 + 10), silhouette_score(X, labels))
assert_array_equal(
silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert_equal(
silhouette_score(list(X), list(y)), silhouette_score(X, y))
def test_calinski_harabaz_score():
rng = np.random.RandomState(seed=0)
# Assert message when there is only one label
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.zeros(10))
# Assert message when all point are in different clusters
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.arange(10))
# Assert the value is 1. when all samples are equals
assert_equal(1., calinski_harabaz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5))
# Assert the value is 0. when all the mean cluster are equal
assert_equal(0., calinski_harabaz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10))
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
assert_almost_equal(calinski_harabaz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
| bsd-3-clause |
tomlof/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
John-Keating/ThinkStats2 | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
fallisd/validate | validate/plot_iterator.py | 1 | 5384 | """
plot_iterator
===============
This module contains functions needed to efficiently loop
through all of the specified plots that will be produced.
.. moduleauthor:: David Fallis
"""
import os
import glob
import defaults as dft
import plot_cases as pc
import matplotlib.pyplot as plt
from yamllog import log
from copy import deepcopy
DEBUGGING = False
def single(plot):
""" Calls the appropriate functions to output the plot
"""
def pregion_standard(pl):
return {'global_map': pc.colormap,
'section': pc.section,
'polar_map': pc.colormap,
'polar_map_south': pc.colormap,
'mercator': pc.colormap,
}[pl]
func_case = pregion_standard(plot['plot_projection'])
return func_case(plot)
def compare(plot):
""" Calls the appropriate functions to output the plot
"""
def pregion_comp(pl):
return {'global_map': pc.colormap_comparison,
'section': pc.section_comparison,
'polar_map': pc.colormap_comparison,
'polar_map_south': pc.colormap_comparison,
'mercator': pc.colormap_comparison,
'time_series': pc.timeseries,
'histogram': pc.histogram,
'zonal_mean': pc.zonalmean,
'taylor': pc.taylor,
'multivariable_taylor': pc.multivariable_taylor,
'scatter': pc.scatter,
}[pl]
func_case = pregion_comp(plot['plot_projection'])
return func_case(plot)
def _remove_plots():
""" Removes old plots
"""
plots_out = []
old_plots = glob.glob('plots/*.pdf')
for f in old_plots:
os.remove(f)
old_plots = glob.glob('plots/*.png')
for f in old_plots:
os.remove(f)
def makeplot(p, plotnames, func):
p['plot_type'] = func.__name__
try:
plot_name = func(p)
except:
with open('logs/log.txt', 'a') as outfile:
outfile.write('Failed to plot ' + p['variable'] + ', ' + p['plot_projection'] + ', ' + p['data_type'] + ', ' + p['comp_model'] + '\n\n')
else:
p['plot_name'] = plot_name + '.pdf'
p['png_name'] = plot_name + '.png'
if p['pdf']:
plotnames.append(dict(p))
log(p)
if p['png']:
p['plot_name'] = p['png_name']
log(p)
with open('logs/log.txt', 'a') as outfile:
outfile.write('Successfully plotted ' + p['variable'] + ', ' + p['plot_projection'] + ', ' + p['plot_type'] + ', ' + p['comp_model'] + '\n\n')
def makeplot_without_catching(p, plotnames, func):
p['plot_type'] = func.__name__
plot_name = func(p)
p['plot_name'] = plot_name + '.pdf'
p['png_name'] = plot_name + '.png'
if p['pdf']:
plotnames.append(dict(p))
log(p)
if p['png']:
p['plot_name'] = p['png_name']
log(p)
def calltheplot(plot, plotnames, ptype):
funcs = {'single': single,
'compare': compare,
}
if DEBUGGING:
makeplot_without_catching(plot, plotnames, funcs[ptype])
else:
makeplot(plot, plotnames, funcs[ptype])
def comp_loop(plot, plotnames, ptype):
plot['comp_flag'] = 'obs'
for o in plot['comp_obs']:
plot['comp_model'] = o
plot['comp_file'] = plot['obs_file'][o]
calltheplot(plot, plotnames, ptype)
plot['comp_flag'] = 'cmip5'
if plot['comp_cmips']:
plot['comp_model'] = 'cmip5'
plot['comp_file'] = plot['cmip5_file']
calltheplot(plot, plotnames, ptype)
plot['comp_flag'] = 'model'
for model in plot['comp_models']:
plot['comp_model'] = model
plot['comp_file'] = plot['model_file'][model]
calltheplot(plot, plotnames, ptype)
plot['comp_flag'] = 'runid'
for i in plot['id_file']:
plot['comp_model'] = i
plot['comp_file'] = plot['id_file'][i]
calltheplot(plot, plotnames, ptype)
def loop_plot_types(plot, plotnames):
if plot['plot_projection'] == 'time_series' or plot['plot_projection'] == 'zonal_mean' or plot['plot_projection'] == 'taylor' or plot['plot_projection'] == 'histogram' or plot['plot_projection'] == 'scatter' or plot['plot_projection'] == 'multivariable_taylor':
plot['comp_model'] = 'Model'
calltheplot(plot, plotnames, 'compare')
else:
plot['comp_model'] = 'Model'
calltheplot(plot, plotnames, 'single')
comp_loop(plot, plotnames, 'compare')
def loop(plots, debug):
""" Loops though the list of plots and the depths within
the plots and outputs each to a pdf
Parameters
----------
plots : list of dictionaries
Returns
-------
list of tuples with (plotname, plot dictionary, plot type)
"""
global DEBUGGING
DEBUGGING = debug
# Remove old plots
_remove_plots()
plotnames = []
for p in plots:
if p['depths'] == [""]:
p['is_depth'] = False
else:
p['is_depth'] = True
if p['plot_projection'] == 'taylor':
loop_plot_types(p, plotnames)
continue
for d in p['depths']:
try:
p['depth'] = int(d)
except: pass
loop_plot_types(p, plotnames)
plt.close('all')
return plotnames
if __name__ == "__main__":
pass
| gpl-2.0 |
dhuppenkothen/UTools | mle.py | 1 | 55419 | #### THIS WILL DO THE MAXIMUM LIKELIHOOD FITTING
#
# and assorted other things related to that
#
# It's kind of awful code.
#
# Separate classes for
# - periodograms (distributed as chi^2_2 or chi^2_2m, for averaged periodograms)
# - light curves (not really accurate, use scipy.optimize.curve_fit if you can)
# - Gaussian Processes (for MAP estimates of GPs)
#
# Note: This script has grown over three years. It's not very optimised and doesn't
# necessarily make sense to someone who is not me. Continue to read on your own peril.
#
#
#
#!/usr/bin/env python
#import matplotlib
#matplotlib.use("Agg")
import matplotlib.pyplot as plt
from pylab import *
#### GENERAL IMPORTS ###
import os
import numpy as np
import scipy
import scipy.optimize
import scipy.stats
import scipy.signal
import math
import copy
#from scikits.statsmodels.sandbox.regression.numdiff import approx_hess3 as approx_hess
from statsmodels.tools.numdiff import approx_hess
### own imports
import generaltools as gt
import posterior
import powerspectrum
### global variables ####
logmin = -100.0
#### AUXILIARY FUNCTIONS ############################################
#### MAKE A SIMPLE LINEAR FUNCTION
#
# f(x) = a*x + b
# a = slope
# b = intercept
#
#
def straight(freq, a, b):
return (a*np.array(freq) + b)
def sigmoid(z, a,b,c,d):
return a*z + b/(c + np.exp(-d*z))
def const(freq, a):
return np.array([np.exp(a) for x in freq])
#### FAST-RISE EXPONENTIAL DECAY (FRED) PROFILE
#
# useful for light curves, not strictly for MLE fitting
#
# f(x) = a*exp(2*t1/t2)**(1/2) * exp(-t1/(time-ts) - (time-ts)/t2)
# a = normalization
# ts = pulse start time
# t1 = characteristic of burst rise
# t2 = characteristic of burst decay
def fred(time, a, tau1, tau2, c = 0.0, t0 = 0.5):
#print("a: " + str(a))
#print("tau1: " + str(tau1))
#print("tau2: " + str(tau2))
#print("len time: " + str(len(time)))
tdiff = time - (time[0]) - t0
#print("len tdiff: " + str(len(tdiff)))
dt = tdiff[1]-tdiff[0]
ts = stepfun(time, time[0]+t0+dt, max(time)+dt)
#tdiff = time - t0
#print("tdiff: " + str(tdiff[0]))
e1 = (-tau1/(tdiff)) - ((tdiff)/tau2)
e2 = (np.exp(2.0*(tau1/tau2)**0.5))
counts = a*np.exp(e1)*e*ts + c
cmod = []
for co in counts:
if isnan(co) or co == np.inf:
cmod.append(c)
else:
cmod.append(co)
#print('funcval in FRED: ' + str(counts))
return cmod
def envelope(x, tstart, tend, trise, tfall, amplitude):
rexp = -trise/(x - tstart)
fexp = tfall/(x - tend)
res = amplitude*np.exp(rexp + fexp)
#xsind = np.array(x).searchsorted(tstart)-1
#xeind = np.array(x).searchsorted(tend)+1
#xnew = x[xsind:xeind]
#xnew = xnew[1:] - xnew[0]
#rexp = -trise/(xnew)
#fexp = tfall/(xnew)
#resb = amplitude*np.exp(rexp + fexp)
#res = np.zeros(len(x))
#res[xsind+1:xeind] = resb
return res
def combfred(x, norm1, norm2, norm3, norm4, tau11, tau12, tau13, tau14, tau21, tau22, tau23, tau24, t01, t02, t03, t04, c):
fred1 = fred(x, norm1, tau11, tau21, 1.0, t01)
fred2 = fred(x, norm2, tau12, tau22, 1.0, t02)
fred3 = fred(x, norm3, tau13, tau23, 1.0, t03)
fred4 = fred(x, norm4, tau14, tau24, 1.0, t04)
fredsum = np.array(np.array(fred1) + np.array(fred2) + np.array(fred3) + np.array(fred4))# + c
return fredsum
def stepfun(x, a, b):
y = np.zeros(len(x))
x = np.array(x)
minind = x.searchsorted(a)
maxind = x.searchsorted(b)
y[minind:maxind] = 1.0
return y
### LORENTZIAN PROFILE ######################
#
# Lorentzian Profile
#
# gamma: width parameter
# norm: normalization
# x0: location of centroid
def lorentzian(x, gamma, norm, x0):
l = norm*(1.0/np.pi)*0.5*gamma/((x-x0)**2.0 + (0.5*gamma)**2.0)
return l
def gaussian(x, mean, scale, norm, c=0.0):
norm = np.exp(norm)
c = np.exp(c)
g = norm*np.exp(-(x-mean)**2.0/(2.0*scale**2.0)) + c
return g
#### POWER LAW ########
#
# f(x) = b*x**a + c
# a = power law index
# b = normalization (LOG)
# c = white noise level (LOG), optional
#
def pl(freq, a, b, c=None):
res = -a*np.log(freq) + b
if c:
return (np.exp(res) + np.exp(c))
else:
return np.exp(res)
### POWER LAW DERIVATIVE
# Derivative of the likelihood function for a power law profile
# Probably not correct.
#
# DON'T USE!
#
def ml_pl_prime(freq, a, b, c):
aprime = -(b*np.log(freq)*((c-1.0)*(freq**a) + b))/(c*freq**-a + b)**2.0
bprime = ((c-1.0)*freq**a + b)/(c*freq**a + b)**2.0
cprime = ((freq**2)*((c-1)*(freq**2.0)*b))/(c*freq**2.0 + b)**2.0
return aprime, bprime, cprime
#### Lorentzian Profile for QPOs
#
# f(x) = (a*b/(2*pi))/((x-c)**2 + a**2)
#
# a = full width half maximum
# b = log(normalization)
# c = centroid frequency
# d = log(noise level) (optional)
#
def qpo(freq, a, b, c, d=None):
gamma = np.exp(a)
norm = np.exp(b)
nu0 = c
alpha = norm*gamma/(math.pi*2.0)
y = alpha/((freq - nu0)**2.0 + gamma**2.0)
if d:
y = y + np.exp(d)
return y
### auxiliary function that makes a Lorentzian with a fixed centroid frequency
### needed for QPO search algorithm
def make_lorentzians(x):
### loop creates many function definitions lorentz, each differs only by the value
### of the centroid frequency f used in computing the spectrum
for f in x:
def create_my_func(f):
def lorentz(x, a, b, e):
result = qpo(x, a, b, f, e)
return result
return lorentz
yield(create_my_func(f))
def plqpo(freq, plind, beta, noise,a, b, c, d=None):
#def plqpo(freq, plind, beta, noise,a, b, d=None):
#c = 93.0943061934
powerlaw = pl(freq, plind, beta, noise)
quasiper = qpo(freq, a, b, c, d)
return powerlaw+quasiper
def bplqpo(freq, lplind, beta, hplind, fbreak, noise, a, b, c, d=None):
#def bplqpo(freq, lplind, beta, hplind, fbreak, noise, a, b,d=None):
#c = 93.0943061934
powerlaw = bpl(freq, lplind, beta, hplind, fbreak, noise)
quasiper = qpo(freq, a, b, c, d)
return powerlaw+quasiper
### BENT POWER LAW
# f(x) = (a[1]*x**a[0])/(1.0 + (x/a[3])**(a[2]-a[0]))+a[4])
# a = low-frequency index, usually between 0 and 1
# b = log(normalization)
# c = high-frequency index, usually between 1 and 4
# d = log(frequency where model bends)
# e = log(white noise level) (optional)
# f = smoothness parameter
#def bpl(freq, a, b, c, d, f=-1.0, e=None):
def bpl(freq, a, b, c, d, e=None):
### compute bending factor
logz = (c - a)*(np.log(freq) - d)
### be careful with very large or very small values
logqsum = sum(np.where(logz<-100, 1.0, 0.0))
if logqsum > 0.0:
logq = np.where(logz<-100, 1.0, logz)
else:
logq = logz
logqsum = np.sum(np.where((-100<=logz) & (logz<=100.0), np.log(1.0 + np.exp(logz)), 0.0))
if logqsum > 0.0:
logqnew = np.where((-100<=logz) & (logz<=100.0), np.log(1.0 + np.exp(logz)), logq)
else:
logqnew = logq
logy = -a*np.log(freq) - logqnew + b
# logy = -a*np.log(freq) + f*logqnew + b
if e:
y = np.exp(logy) + np.exp(e)
else:
y = np.exp(logy)
return y
# f(x) = (a[1]*x**a[0])/(1.0 + (x/a[3])**(a[2]-a[0]))+a[4])
# a = low-frequency index, usually between 0 and 1
# b = log(normalization)
# c = high-frequency index, usually between 1 and 4
# d = log(frequency where model bends)
# e = log(white noise level) (optional)
# f = smoothness parameter
#def bpl(freq, a, b, c, d, f=-1.0, e=None):
def bpl2(freq, a, b, c, d, e=None):
### compute bending factor
logz = (c - a)*(np.log(freq) - d)
### be careful with very large or very small values
logqsum = sum(np.where(logz<-100, 1.0, 0.0))
if logqsum > 0.0:
logq = np.where(logz<-100, 1.0, logz)
else:
logq = logz
logqsum = np.sum(np.where((-100<=logz) & (logz<=100.0), np.log(1.0 + np.exp(logz)), 0.0))
if logqsum > 0.0:
logqnew = np.where((-100<=logz) & (logz<=100.0), np.log(1.0 + np.exp(logz)), logq)
else:
logqnew = logq
logy = -a*np.log(freq) + logqnew + b
# logy = -a*np.log(freq) + f*logqnew + b
if e:
y = np.exp(logy) + np.exp(e)
else:
y = np.exp(logy)
return y
### FIT CONSTRAINED BENT POWER LAW
# f(x) = (a[1]*x**a[0])/(1.0 + (x/a[3])**(a[2]-a[0]))+a[4])
# a = low-frequency index, MANUALLY SET TO -1
# b = log(normalization)
# c = high-frequency index, usually between 1 and 4
# d = log(frequency where model bends)
# e = log(white noise level)
def cbpl(freq, c, b, d, e):
### compute bending factor
logz = (1.0 - c)*(np.log(freq) - np.log(d))
logqsum = sum(np.where(logz<-16, 1.0, 0.0))
if logqsum > 0.0:
logq = np.where(logz<-16, 1.0, logz)
else:
logq = logz
logqsum = np.sum(np.where((-16<=logz) & (logz<=16.0), np.log(1.0 + np.exp(logz)), 0.0))
if logqsum > 0.0:
logqnew = np.where((-16<=logz) & (logz<=16.0), np.log(1.0 + np.exp(logz)), logq)
else:
logqnew = logq
y = np.exp(-1.0*np.log(freq) - logqnew + b) + np.exp(e)
return y
#### COMBINE FUNCTIONS INTO A NEW FUNCTION
#
# This function will return a newly created function,
# combining all functions giving in *funcs
#
# *funcs should be tuples of (function name, no. of parameters)
# where the number of parameters is that of the function minus the
# x-coordinate ('freq').
#
#
# **kwargs should only really have one keyword:
# mode = 'add'
# which defines whether the model components will be added ('add')
# or multiplied ('multiply').
# By default, if nothing is given, components will be added
#
#
# NOTE: When calling combmod, make sure you put in the RIGHT NUMBER OF
# PARAMETERS and IN THE RIGHT ORDER!
#
#
# Example:
# - make a combined power law and QPO model, multiplying components together.
# The power law includes white noise (3 parameters, otherwise two), the
# QPO model doesn't (3 parameters, otherwise 4):
# >>> combmod = combine_models((pl, 3), qpo(3), mode='multiply')
#
#
def combine_models(*funcs, **kwargs):
### assert that keyword 'mode' is given in function call
try:
assert kwargs.has_key('mode')
### if that's not true, catch Assertion error and manually set mode = 'add'
except AssertionError:
kwargs["mode"] = 'add'
### tell the user what mode the code is using, exit if mode not recognized
if kwargs["mode"] == 'add':
print("Model components will be added.")
elif kwargs['mode'] == 'multiply':
print('Model components will be multiplied.')
else:
raise Exception("Operation on model components not recognized.")
### this is the combined function returned by combined_models
### 'freq': x-coordinate of the model
### '*args': model parameters
def combmod(freq, *args):
### create empty list for result of the model
res = np.zeros(len(freq))
### initialize the parameter count to make sure the right parameters
### go into the right function
parcount = 0
### for each function, compute f(x) and add or multiply the result with the previous iteration
for i,x in enumerate(funcs):
funcargs = args[parcount:parcount+int(x[1])]
if kwargs['mode'] == 'add':
res = res + x[0](freq, *funcargs)
elif kwargs['mode'] == 'multiply':
res = res * x[0](freq, *funcargs)
parcount = parcount + x[1]
return res
return combmod
## MAXIMUM LIKELIHOOD FUNCTION
# Not sure I need this!
def maxlike(freq, power, func, pars):
funcval = func(freq, *pars)
res = np.sum(np.log(funcval))+ np.sum(power/funcval)
if res == inf or np.isnan(res):
res = 0.0
return res
#### ANALYTIC EXPRESSION FOR THE HESSIAN FOR THE POWER LAW FITTING
#
# This function computes the Hessian (second derivative tensor) of the
# maximum likelihood function of the power law model.
# I can use this to cross-check results from numerical
# estimation of the covariance matrix (=inverse of the hessian).
#
# This function returns the Hessian matrix.
#
#
def pl_covariance(freq, power, pars):
### power law index
b = pars[0]
### normalization
a = np.exp(pars[1])
### noise level
c = np.exp(pars[2])
exppos = freq**b
expneg = freq**(-b)
exptwo = freq**(2*b)
### denominator appearing often:
denoma = a*exppos + c
denomc = a + c*exppos
funcval = a*expneg + c
logfreq = np.log(freq)
### d^2L/da^2
datwo = np.sum((2.0*power*exptwo/denoma**2.0) - 1.0/(denomc**2.0))
### d^2L/db^2
dbout = a*logfreq**2.0
dbfirst = power*(a*exptwo - c*exppos)/denoma**3.0
dbsecond = c*exppos/denomc**2.0
dbtwo = np.sum(dbout*(dbfirst + dbsecond))
### d^2L/dc^2
dctwo = np.sum((2.0*power/denoma**3.0) - 1.0/funcval**2.0)
### d^2L/dadb
dadbfirst = power*exppos/denoma
dadbcurly = (2.0*power*exptwo/denoma**3.0) + 1.0/denomc**2.0
dadblast = 1.0/denomc
dadb = np.sum(logfreq*(-dadbfirst + a*dadbcurly - dadblast))
### d^2L/dadc
dadc = np.sum(exppos*((2.0*power/denoma**3.0) - 1.0/denomc**2.0))
### d^2L/dbdc
dbdcfactor = a*expneg*logfreq
dbdcrest = (2.0*power*exptwo/denoma**3.0) + 1.0/funcval**2.0
dbdc = np.sum(dbdcfactor*dbdcrest)
### assemble Hessian
hess = [[datwo, dadb, dadc],[dadb, dbtwo, dbdc],[dadb, dbdc, dctwo]]
return hess
#### CLASS THAT FITS POWER SPECTRA USING MLE ##################
#
# This class provides functionality for maximum likelihood fitting
# of periodogram data to a set of models defined above.
#
# It draws heavily on the various optimization routines provided in
# scipy.optimize, and additionally has the option to use R functionality
# via rPy and a given set of functions defined in an R-script.
#
# Note that many different optimization routines are available, and not all
# may be appropriate for a given problem.
# Constrained optimization is available via the constrained BFGS and TNC routines.
#
#
class MaxLikelihood(object):
### x = x-coordinate of data
### y = y-coordinate of data
### obs= if True, compute covariances and print summary to screen
###
### fitmethod = choose optimization method
### options are:
### 'simplex': use simplex downhill algorithm
### 'powell': use modified Powell's algorithm
### 'gradient': use nonlinear conjugate gradient
### 'bfgs': use BFGS algorithm
### 'newton': use Newton CG
### 'leastsq' : use least-squares method
### 'constbfgs': constrained BFGS algorithm
### 'tnc': constrained optimization via a truncated Newton algorithm
### 'nlm': optimization via R's non-linear minimization routine
### 'anneal': simulated annealing for convex problems
def __init__(self, x, y, obs=True, fitmethod='powell'):
### save power spectrum in attributes
self.x= x
self.y= y
### Is this a real observation or a fake periodogram to be fitted?
self.obs = obs
self.nlmflag = False
MaxLikelihood._set_fitmethod(self, fitmethod)
def _set_fitmethod(self, fitmethod):
### select fitting method
if fitmethod.lower() in ['simplex']:
self.fitmethod = scipy.optimize.fmin
elif fitmethod.lower() in ['powell']:
self.fitmethod = scipy.optimize.fmin_powell
elif fitmethod.lower() in ['gradient']:
self.fitmethod = scipy.optimize.fmin_cg
elif fitmethod.lower() in ['bfgs']:
self.fitmethod = scipy.optimize.fmin_bfgs
### this one breaks because I can't figure out the syntax for fprime
elif fitmethod.lower() in ['newton']:
self.fitmethod = scipy.optimize.fmin_ncg
elif fitmethod.lower() in ['leastsq']:
self.fitmethod = scipy.optimize.leastsq
elif fitmethod.lower() in ['constbfgs']:
self.fitmethod = scipy.optimize.fmin_l_bfgs_b
elif fitmethod.lower() in ['tnc']:
self.fitmethod = scipy.optimize.fmin_tnc
else:
print("Minimization method not recognized. Using standard (Powell's) method.")
self.fitmethod = scipy.optimize.fmin_powell
if ndim(self.y) ==1:
### smooth data by three different factors
self.smooth3 = scipy.signal.wiener(self.y, 3)
self.smooth5 = scipy.signal.wiener(self.y, 5)
self.smooth11 = scipy.signal.wiener(self.y, 11)
### Do a maximum likelihood fitting with function func and
### initial parameters ain
### if residuals are to be fit, put a list of residuals into keyword 'residuals'
### func = function to be fitted
### ain = list with set of initial parameters
### bounds = bounds on parameter ranges (for constrained optimization
### obs = if True, compute covariance and print summary to screen
### noise = if True, the last parameter in ain is noise and will be renormalized
### residuals = put list of residuals here if they should be fit rather than self.y
def mlest(self, func, ain, bounds = None, obs=True, neg=True, functype='posterior'):
### extract frequency and powers from periodogram
#freq = np.array(self.x)
#if not residuals is None:
# power = residuals
#else:
# power = np.array(self.y)
#lenpower = float(len(self.y))
## renormalize normalization so it's in the right range
#varobs = np.sum(power)
#varmod = np.sum(func(freq, *ain))
#renorm = varobs/varmod
#if len(ain) > 1:
# ain[1] = ain[1] + np.log(renorm)
### If last parameter is noise level, renormalize noise level
### to something useful:
#if not noise is None:
# ### take the last 50 elements of the power spectrum
# noisepower = power[-50:]
# meannoise = np.log(np.mean(noisepower))
# if func == pl_qpo:
# ain[2] = meannoise
# if func == bpl_qpo:
# ain[4] = meannoise
# else:
# ain[noise] = meannoise
### definition of the likelihood function
#def maxlike(pars):
# funcval = func(freq, *pars)
# res = np.sum(np.log(funcval))+ np.sum(power/funcval)
# return res
#res = maxlike(ain)
fitparams = _fitting(func, ain, bounds, obs=True)
#fitparams["model"] = str(func).split()[1]
#fitparams["mfit"] = func(self.x, *fitparams['popt'])
### calculate model power spectrum from optimal parameters
#fitparams['mfit'] = func(self.x, *fitparams['popt'])
### figure-of-merit (SSE)
#fitparams['merit'] = np.sum(((self.y-fitparams['mfit'])/fitparams['mfit'])**2.0)
### find highest outlier
#plrat = 2.0*(self.y/fitparams['mfit'])
#fitparams['sobs'] = np.sum(plrat)
#if nmax ==1:
### plmaxpow is the maximum of 2*data/model
# plmaxpow = max(plrat[1:])
#print('plmaxpow: ' + str(plmaxpow))
# plmaxind = np.where(plrat == plmaxpow)[0][0]
#print('plmaxind: ' + str(plmaxind))
# plmaxfreq = self.x[plmaxind]
#else:
# plratsort = plrat.sort()
# plmaxpow = plrat[-nmax:]
# plmaxind, plmaxfreq = [], []
# for p in plmaxpow:
# plmaxind_temp = np.where(plrat == p)[0][0]
# plmaxind.append(plmaxind_temp)
# plmaxfreq.append(self.x[plmaxind_temp])
#fitparams['maxpow'] = plmaxpow
#fitparams['maxind'] = plmaxind
#fitparams['maxfreq'] = plmaxfreq
## do a KS test comparing residuals to the exponential distribution
#plks = scipy.stats.kstest(plrat/2.0, 'expon', N=len(plrat))
#fitparams['ksp'] = plks[1]
#print("The figure-of-merit function for this model is: " + str(fitparams['merit']) + " and the fit for " + str(fitparams['dof']) + " dof is " + str(fitparams['merit']/fitparams['dof']) + ".")
if functype in ['p', 'post', 'posterior']:
fitparams['deviance'] = 2.0*func.loglikelihood(fitparams['popt'], neg=True)
elif functype in ['l', 'like', 'likelihood']:
fitparams['deviance'] = -2.0*func(fitparams['popt'])
print("Fitting statistics: ")
print(" -- number of frequencies: " + str(len(self.x)))
print(" -- Deviance [-2 log L] D = " + str(fitparams['deviance']))
#print(" -- Highest data/model outlier(s) 2I/S = " + str(fitparams['maxpow']))
#print(" at frequency(ies) f_max = " + str(fitparams['maxfreq']))
#print(" -- Summed Residuals S = " + str(fitparams['sobs']))
#print(" -- Expected S ~ " + str(fitparams['sexp']) + " +- " + str(fitparams['ssd']))
#print(" -- KS test p-value (use with caution!) p = " + str(fitparams['ksp']))
#print(" -- merit function (SSE) M = " + str(fitparams['merit']))
return fitparams
### Fitting Routine
### optfunc: function to be minimized
### ain: initial parameter set
### bounds: bounds for constrained optimization
### optfuncprime: analytic derivative of optfunc (if required)
### neg: bool keyword for MAP estimation (if done):
### if True: compute the negative of the posterior
def _fitting(self, optfunc, ain, bounds, optfuncprime=None, neg = True, obs=True):
#print("optfunc in _fitting:" + str(optfunc))
lenpower = float(len(self.y))
if neg == True:
if scipy.__version__ < "0.10.0":
args = [neg]
else:
args = (neg,)
else:
args = ()
#print("args: " + str(args))
#print("args: " + str(args))
### different commands for different fitting methods,
### at least until scipy 0.11 is out
funcval = 100.0
while funcval == 100 or funcval == 200 or funcval == 0.0 or funcval == np.inf or funcval == -np.inf:
## constrained minimization with truncated newton or constrained bfgs
if self.fitmethod == scipy.optimize.fmin_tnc or self.fitmethod == scipy.optimize.fmin_l_bfgs_b:
if bounds is None:
bounds = [[None, None] for x in range(len(ain))]
#print("No bounds given. Using no bounds.")
aopt = self.fitmethod(optfunc, ain, disp=0, args=args, bounds = bounds, approx_grad=True, maxfun=1000)
## Newton conjugate gradient, which doesn't work
elif self.fitmethod == scipy.optimize.fmin_ncg:
aopt = self.fitmethod(optfunc, ain, optfuncprime, disp=0,args=args)
## use R's non-linear minimization
elif self.nlmflag == True:
if func == pl:
mod = [0,1]
elif func == bpl:
mod = [1,1]
elif func == pl_qpo:
mod = [5,1]
elif func == bpl_qpo:
mod = [6,1]
elif func.__name__ == 'lorentz':
mod = [7,1]
aopt = self.fitmethod(robjects.r['lpost'], p=robjects.FloatVector(ain), x=robjects.FloatVector(self.x), y=robjects.FloatVector(power), hessian=True, mod=robjects.IntVector(mod))
### BFGS algorithm
elif self.fitmethod == scipy.optimize.fmin_bfgs:
aopt = self.fitmethod(optfunc, ain, disp=0,full_output=True, args=args)
warnflag = aopt[6]
if warnflag == 1 :
print("*** ACHTUNG! Maximum number of iterations exceeded! ***")
elif warnflag == 2:
print("Gradient and/or function calls not changing!")
## all other methods: Simplex, Powell, Gradient
else:
aopt = self.fitmethod(optfunc, ain, disp=0,full_output = True, args=args)
funcval = aopt[1]
ain = np.array(ain)*((np.random.rand(len(ain))-0.5)*4.0)
### make a dictionary with best-fit parameters:
## popt: best fit parameters (list)
## result: value of ML function at minimum
## model: the model used
if self.nlmflag:
fitparams = {'popt':np.array(aopt[1]), 'result':aopt[0]}
else:
fitparams = {'popt':aopt[0], 'result':aopt[1]}
### calculate model power spectrum from optimal parameters
#fitparams['mfit'] = func(self.x, *fitparams['popt'])
### degrees of freedom
fitparams['dof'] = lenpower - float(len(fitparams['popt']))
### Akaike Information Criterion
fitparams['aic'] = fitparams['result']+2.0*len(ain)
### Bayesian Information Criterion
fitparams['bic'] = fitparams['result'] + len(ain)*len(self.x)
### compute deviance
try:
fitparams['deviance'] = 2.0*optfunc.loglikelihood(fitparams['popt'])
except AttributeError:
fitparams['deviance'] = 2.0*optfunc(fitparams['popt'])
fitparams['sexp'] = 2.0*len(self.x)*len(fitparams['popt'])
fitparams['ssd'] = np.sqrt(2.0*fitparams['sexp'])
### smooth data by three different factors
if ndim(self.y) == 1:
fitparams['smooth3'] = scipy.signal.wiener(self.y, 3)
fitparams['smooth5'] = scipy.signal.wiener(self.y, 5)
fitparams['smooth11'] = scipy.signal.wiener(self.y, 11)
### if this is an observation (not fake data), compute the covariance matrix
if obs == True:
### for BFGS, get covariance from algorithm output
if self.fitmethod == scipy.optimize.fmin_bfgs:
print("Approximating covariance from BFGS: ")
covar = aopt[3]
### for NLM, get covariance from R output
elif self.nlmflag:
print("Getting Hessian from R routine: ")
phess = aopt[3]
covar = robjects.r.solve(phess)
covar = np.array(covar)
else:
#print("neg: " + str(args))
### calculate Hessian approximating with finite differences
print("Approximating Hessian with finite differences ...")
phess = approx_hess(aopt[0], optfunc, neg=args)
covar = np.linalg.pinv(phess)
#phess2 = pl_covariance(self.x, self.y, fitparams['popt'])
### covariance is the inverse of the Hessian
print "Hessian (empirical): " + str(phess)
covar = np.linalg.inv(phess)
print "Covariance (empirical): " + str(covar)
fitparams['cov'] = covar
### errors of parameters are on the diagonal of the covariance
### matrix; take square root to get standard deviation
stderr = np.sqrt(np.diag(covar))
fitparams['err'] = stderr
### Print results to screen
print("The best-fit model parameters plus errors are:")
for i,(x,y) in enumerate(zip(fitparams['popt'], stderr)):
print("Parameter " + str(i) + ": " + str(x) + " +/- " + str(y))
print("The Akaike Information Criterion of the power law model is: "+ str(fitparams['aic']) + ".")
return fitparams
#### This function computes the Likelihood Ratio Test between two nested models
###
### mod1: model 1 (simpler model)
### ain1: list of input parameters for model 1
### mod2: model 2 (more complex model)
### ain2: list of input parameters for model 2
### bounds1: bounds for model 1 (constrained optimization)
### bounds2: bounds for model 2 (constrained optimization)
def compute_lrt(self, mod1, ain1, mod2, ain2, bounds1=None, bounds2=None, noise1 = -1, noise2 = -1):
### fit data with both models
par1 = self.mlest(mod1, ain1, bounds=bounds1, obs=self.obs, noise=noise1, nmax=nmax)
par2 = self.mlest(mod2, ain2, bounds=bounds2, obs=self.obs, noise=noise2, nmax=nmax)
### extract dictionaries with parameters for each
varname1 = str(mod1).split()[1] + 'fit'
varname2 = str(mod2).split()[1] + 'fit'
self.__setattr__(varname1, par1)
self.__setattr__(varname2, par2)
### compute log likelihood ratio as difference between the deviances
self.lrt = par1['deviance'] - par2['deviance']
if self.obs == True:
print("The Likelihood Ratio for models " + str(mod1).split()[1] + " and " + str(mod2).split()[1] + " is: LRT = " + str(self.lrt))
return self.lrt
### auxiliary function that makes a Lorentzian with a fixed centroid frequency
### needed for QPO search algorithm
def __make_lorentzians(self,x):
### loop creates many function definitions lorentz, each differs only by the value
### of the centroid frequency f used in computing the spectrum
for f in x:
def create_my_func(f):
def lorentz(x, a, b, e):
result = qpo(x, a, b, f, e)
return result
return lorentz
yield(create_my_func(f))
#### Fit Lorentzians at each frequency in the spectrum
#### and return a list of log-likelihoods at each value
### fitpars = parameters of broadband noise fit
### residuals: if true: divide data by best-fit model in fitpars
def fitqpo(self, fitpars=None, residuals=False):
if residuals:
### extract model fit
mfit = fitpars['mfit']
### compute residuals: data/model
residuals = np.array(fitpars["smooth5"])/mfit
else:
residuals = np.array(fitpars["smooth5"])
### constraint on width of QPO: must be bigger than 2*frequency resolution
gamma_min = 2.0*(self.x[2]-self.x[1])
### empty list for log-likelihoods
like_rat = []
### fit a Lorentzian at every frequency
for f, func, res in zip(self.x[3:-3], self.__make_lorentzians(self.x[3:-3]), residuals[3:-3]):
### constraint on width of QPO: must be narrower than the centroid frequency/2
gamma_max = f/2.0
norm = np.mean(residuals)+np.var(residuals)
ain = [gamma_min, norm, 0.0]
### fit QPO to data
#pars = self.mlest(func, ain, bounds=[[gamma_min, gamma_max], [None, None], [None, None]], noise = True, obs=False, residuals=None)
pars = self.mlest(func, ain, noise = -1, obs=False, residuals=residuals)
### save fitted frequency and data residuals in parameter dictionary
pars['fitfreq'] = f
pars['residuals'] = residuals
like_rat.append(pars)
### returns a list of parameter dictionaries
return like_rat
#### Find QPOs in Periodogram data
### func = broadband noise model
### ain = input parameters for broadband noise model
### fitmethod = which method to use for fitting the QPOs
### plot = if True, save a plot with log-likelihoods
### plotname = string used in filename if plot == True
### obs = if True, compute covariances and print out stuff
def find_qpo(self, func, ain,
bounds=None,
fitmethod='nlm',
plot=False,
plotname=None,
obs = False):
### fit broadband noise model to the data
optpars = self.mlest(func, ain, obs=obs, noise=-1)
### fit a variable Lorentzian to every frequency and return parameter values
lrts = self.fitqpo(fitpars=optpars, residuals=True)
### list of likelihood ratios
like_rat = np.array([x['deviance'] for x in lrts])
### find minimum likelihood ratio
minind = np.where(like_rat == min(like_rat))
minind = minind[0][0]+3
#print(minind)
minfreq = self.x[minind] ### ... maybe not! Needs to be +1 because first frequency left out in psfit
print("The frequency of the tentative QPO is: " + str(minfreq))
residuals = self.smooth5/optpars['mfit']
best_lorentz = self.__make_lorentzians([minfreq])
noiseind = len(optpars['popt']) - 1
# for z in self.__make_lorentzians([minfreq]):
# qpofit_res = self.mlest(z, lrts[minind+1]['popt'], obs=False, noise=-1, residuals=residuals)
# print("qpofit_res: " + str(qpofit_res['popt']))
### minimum width of QPO
gamma_min = np.log((self.x[1]-self.x[0])*3.0)
### maximum width of QPO
gamma_max = minfreq/1.5
print('combmod first component: ' + str(func))
### create a combined model of broadband noise model + QPO
combmod = combine_models((func, len(optpars['popt'])), (qpo, 3), mode='add')
### make a list of input parameters
inpars = list(optpars['popt'].copy())
inpars.extend(lrts[minind-3]['popt'][:2])
inpars.extend([minfreq])
qpobounds = [[None, None] for x in range(len(inpars)-3)]
qpobounds.extend([[gamma_min, gamma_max], [None, None], [None,None]])
### fit broadband QPO + noise model, using best-fit parameters as input
qpopars = self.mlest(combmod, inpars, bounds=qpobounds, obs=obs, noise=noiseind, smooth=0)
### likelihood ratio of func+QPO to func
lrt = optpars['deviance'] - qpopars['deviance']
like_rat_norm = like_rat/np.mean(like_rat)*np.mean(self.y)*100.0
if plot:
plt.figure()
axL = plt.subplot(1,1,1)
plt.plot(self.x, self.y, lw=3, c='navy')
plt.plot(self.x, qpopars['mfit'], lw=3, c='MediumOrchid')
plt.xscale("log")
plt.yscale("log")
plt.xlabel('Frequency')
plt.ylabel('variance normalized power')
#yticks_left, ylabels_left = plt.yticks()
#nr_yticks_left = len(yticks_left)
axR = plt.twinx()
#axR = plt.subplot(1,1,1, sharex=axL, frameon=False)
axR.yaxis.tick_right()
axR.yaxis.set_label_position("right")
plt.plot(self.x[3:-3], like_rat, 'r--', lw=2, c="DeepSkyBlue")
plt.ylabel("-2*log-likelihood")
#yticks_right, ylabels_right = plt.yticks()
#tickmin, tickmax = yticks_right[0], yticks_right[-1]
#tickloc_yleft = np.linspace(tickmin, tickmax, num=nr_yticks_left)
#axR.yaxis.set_ticks(tickloc_yleft)
#axR.yaxis.set_ticklabels(["%.2f" % val for val in tickloc_yleft])
#plt.axis([min(self.x), max(self.x), min(self.y), max(self.y)])
plt.axis([min(self.x), max(self.x), min(like_rat)-np.var(like_rat), max(like_rat)+np.var(like_rat)])
plt.savefig(plotname+'.png', format='png')
plt.close()
return lrt, optpars, qpopars
### plot two fits of broadband models against each other
def plotfits(self, par1, par2 = None, namestr='test', log=False):
### make a figure
f = plt.figure(figsize=(12,10))
### adjust subplots such that the space between the top and bottom of each are zero
plt.subplots_adjust(hspace=0.0, wspace=0.4)
### first subplot of the grid, twice as high as the other two
### This is the periodogram with the two fitted models overplotted
s1 = plt.subplot2grid((4,1),(0,0),rowspan=2)
if log:
logx = np.log10(self.x)
logy = np.log10(self.y)
logpar1 = np.log10(par1['mfit'])
logpar1s5 = np.log10(par1['smooth5'])
p1, = plt.plot(logx, logy, color='black', linestyle='steps-mid')
p1smooth = plt.plot(logx, logpar1s5, lw=3, color='orange')
p2, = plt.plot(logx, logpar1, color='blue', lw=2)
else:
p1, = plt.plot(self.x, self.y, color='black', linestyle='steps-mid')
p1smooth = plt.plot(self.x, par1['smooth5'], lw=3, color='orange')
p2, = plt.plot(self.x, par1['mfit'], color='blue', lw=2)
if par2:
if log:
logpar2 = np.log10(par2['mfit'])
p3, = plt.plot(logx, logpar2, color='red', lw=2)
else:
p3, = plt.plot(self.x, par2['mfit'], color='red', lw=2)
plt.legend([p1, p2, p3], ["observed periodogram", par1['model'] + " fit", par2['model'] + " fit"])
else:
plt.legend([p1, p2], ["observed periodogram", par1['model'] + " fit"])
if log:
plt.axis([min(logx), max(logx), min(logy)-1.0, max(logy)+1])
plt.ylabel('log(Leahy-Normalized Power)', fontsize=18)
else:
plt.xscale("log")
plt.yscale("log")
plt.axis([min(self.x), max(self.x), min(self.y)/10.0, max(self.y)*10.0])
plt.ylabel('Leahy-Normalized Power', fontsize=18)
plt.title("Periodogram and fits for burst " + namestr, fontsize=18)
### second subplot: power/model for Power law and straight line
s2 = plt.subplot2grid((4,1),(2,0),rowspan=1)
pldif = self.y/par1['mfit']
if par2:
bpldif = self.y/par2['mfit']
if log:
plt.plot(logx, pldif, color='black', linestyle='steps-mid')
plt.plot(logx, np.ones(len(self.x)), color='blue', lw=2)
else:
plt.plot(self.x, pldif, color='black', linestyle='steps-mid')
plt.plot(self.x, np.ones(len(self.x)), color='blue', lw=2)
plt.ylabel("Residuals, \n" + par1['model'] + " model", fontsize=18)
if log:
plt.axis([min(logx), max(logx), min(pldif), max(pldif)])
else:
plt.xscale("log")
plt.yscale("log")
plt.axis([min(self.x), max(self.x), min(pldif), max(pldif)])
if par2:
bpldif = self.y/par2['mfit']
### third subplot: power/model for bent power law and straight line
s3 = plt.subplot2grid((4,1),(3,0),rowspan=1)
if log:
plt.plot(logx, bpldif, color='black', linestyle='steps-mid')
plt.plot(logx, np.ones(len(self.x)), color='red', lw=2)
plt.axis([min(logx), max(logx), min(bpldif), max(bpldif)])
else:
plt.plot(self.x, bpldif, color='black', linestyle='steps-mid')
plt.plot(self.x, np.ones(len(self.x)), color='red', lw=2)
plt.xscale("log")
plt.yscale("log")
plt.axis([min(self.x), max(self.x), min(bpldif), max(bpldif)])
plt.ylabel("Residuals, \n" + par2['model'] + " model", fontsize=18)
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(14)
if log:
plt.xlabel("log(Frequency) [Hz]", fontsize=18)
else:
plt.xlabel("Frequency [Hz]", fontsize=18)
### make sure xticks are taken from first plots, but don't appear there
plt.setp(s1.get_xticklabels(), visible=False)
### save figure in png file and close plot device
plt.savefig(namestr + '_ps_fit.png', format='png')
plt.close()
return
##########################################################
##########################################################
##########################################################
#### PERIODOGRAM FITTING SUBCLASS ################
#
# Compute Maximum A Posteriori (MAP) parameters
# for periodograms via Maximum Likelihood
# using the
# posterior class above
#
#
#
#
#
#
#
#
class PerMaxLike(MaxLikelihood):
### ps = PowerSpectrum object with periodogram
### obs= if True, compute covariances and print summary to screen
###
### fitmethod = choose optimization method
### options are:
### 'simplex': use simplex downhill algorithm
### 'powell': use modified Powell's algorithm
### 'gradient': use nonlinear conjugate gradient
### 'bfgs': use BFGS algorithm
### 'newton': use Newton CG
### 'leastsq' : use least-squares method
### 'constbfgs': constrained BFGS algorithm
### 'tnc': constrained optimization via a truncated Newton algorithm
### 'nlm': optimization via R's non-linear minimization routine
### 'anneal': simulated annealing for convex problems
def __init__(self, ps, obs=True, fitmethod='powell'):
### ignore first elements in ps.freq and ps.ps (= no. of photons)
#ps.freq = np.array(ps.freq[1:])
self.x = ps.freq[1:]
#ps.ps = np.array(ps.ps[1:])
self.y = ps.ps[1:]
self.ps = ps
### Is this a real observation or a fake periodogram to be fitted?
self.obs = obs
self.nlmflag = False
### set fitmethod
self._set_fitmethod(fitmethod)
def mlest(self, func, ain, bounds = None, obs=True, noise=None, nmax=1, residuals = None, smooth=0, m=1, map=True):
if smooth == 0 :
power = self.y
elif smooth == 3:
power = self.smooth3
elif smooth == 5:
power = self.smooth5
elif smooth == 11:
power = self.smooth11
else:
raise Exception('No valid option for kwarg "smooth". Options are 0,3,5 and 11!')
if not residuals is None:
power = residuals
lenpower = float(len(power))
### renormalize normalization so it's in the right range
varobs = np.sum(power)
varmod = np.sum(func(self.x, *ain))
renorm = varobs/varmod
if len(ain) > 1:
ain[1] = ain[1] + np.log(renorm)
#print('noise index: ' + str(noise))
### If last parameter is noise level, renormalize noise level
### to something useful:
if not noise is None:
#print("Renormalizing noise level ...")
### take the last 50 elements of the power spectrum
noisepower = power[-51:-1]
meannoise = np.log(np.mean(noisepower))
ain[noise] = meannoise
### set function to be minimized: posterior density for periodograms:
pstemp = powerspectrum.PowerSpectrum()
pstemp.freq = self.x
pstemp.ps = power
pstemp.df = self.ps.df
if m == 1:
#print("I am here")
lposterior = posterior.PerPosterior(pstemp, func)
elif m > 1:
lposterior = posterior.StackPerPosterior(pstemp, func, m)
else:
raise Exception("Number of power spectra is not a valid number!")
#print("ain: " + str(ain))
#print("lposterior(initial value): " + str(lposterior(ain)))
if not map:
lpost = lposterior.loglikelihood
else:
lpost = lposterior
# lpost = posterior.PerPosterior(pstemp, func)
lpostain = lpost(ain)
#print(lpostain)
# fitparams = self._fitting(lpost.loglikelihood, ain, bounds, neg = False, obs=obs)
fitparams = self._fitting(lpost, ain, bounds, neg = True, obs=obs)
#print("fitparams: " + str(fitparams["popt"]))
fitparams["model"] = str(func).split()[1]
fitparams["mfit"] = func(self.x, *fitparams['popt'])
#print("mfit: " + str(fitparams["mfit"]))
### calculate model power spectrum from optimal parameters
#fitparams['mfit'] = func(self.x, *fitparams['popt'])
### figure-of-merit (SSE)
fitparams['merit'] = np.sum(((power-fitparams['mfit'])/fitparams['mfit'])**2.0)
### find highest outlier
plrat = 2.0*(self.y/fitparams['mfit'])
#print(plrat)
fitparams['sobs'] = np.sum(plrat)
#### plmaxpow is the maximum of 2*data/model
#plmaxpow = max(plrat)
##print("plmaxpow: " + str(plmaxpow))
#try:
# plmaxind = np.where(plrat == plmaxpow)[0]
##print("plmaxind: " + str(plmaxind))
# plmaxfreq = self.x[plmaxind]
#except TypeError:
# plmaxfreq = None
if nmax ==1:
### plmaxpow is the maximum of 2*data/model
plmaxpow = max(plrat[1:])
#print('plmaxpow: ' + str(plmaxpow))
plmaxind = np.where(plrat == plmaxpow)[0]
#print('plmaxind: ' + str(plmaxind))
if len(plmaxind) > 1:
plmaxind = plmaxind[0]
elif len(plmaxind) == 0:
plmaxind = -2
plmaxfreq = self.x[plmaxind]
else:
plratsort = copy.copy(plrat)
plratsort.sort()
plmaxpow = plratsort[-nmax:]
plmaxind, plmaxfreq = [], []
for p in plmaxpow:
try:
plmaxind_temp = np.where(plrat == p)[0]
if len(plmaxind_temp) > 1:
plmaxind_temp = plmaxind_temp[0]
elif len(plmaxind_temp) == 0:
plmaxind_temp = -2
plmaxind.append(plmaxind_temp)
plmaxfreq.append(self.x[plmaxind_temp])
except TypeError:
plmaxind.append(None)
plmaxfreq.append(None)
fitparams['maxpow'] = plmaxpow
fitparams['maxind'] = plmaxind
fitparams['maxfreq'] = plmaxfreq
s3rat = 2.0*(fitparams['smooth3']/fitparams['mfit'])
fitparams['s3max'] = max(s3rat[1:])
try:
s3maxind = np.where(s3rat == fitparams['s3max'])[0]
if len(s3maxind) > 1:
s3maxind = s3maxind[0]
fitparams['s3maxfreq'] = self.x[s3maxind]
except TypeError:
fitparams["s3maxfreq"] = None
s5rat = 2.0*(fitparams['smooth5']/fitparams['mfit'])
fitparams['s5max'] = max(s5rat[1:])
try:
s5maxind = np.where(s5rat == fitparams['s5max'])[0]
if len(s5maxind) > 1:
s5maxind = s5maxind[0]
fitparams['s5maxfreq'] = self.x[s5maxind]
except TypeError:
fitparams['s5maxfreq'] = None
s11rat = 2.0*(fitparams['smooth11']/fitparams['mfit'])
fitparams['s11max'] = max(s11rat[1:])
try:
s11maxind = np.where(s11rat == fitparams['s11max'])[0]
if len(s11maxind) > 1:
s11maxind = s11maxind[0]
fitparams['s11maxfreq'] = self.x[s11maxind]
except TypeError:
fitparams['s11maxfreq'] = None
### compute binned periodograms and find highest outlier in those:
df = (self.x[1]-self.x[0])
### first, compute the maximum binning that would even make sense
bmax = int(self.x[-1]/(2.0*(self.x[1]-self.x[0])))
#print('bmax: ' + str(bmax))
bins = [1,3,5,7,10,15,20,30,50,70,100,200,300,500]
bindict = {}
for b in bins:
#print('bmax: ' + str(bmax))
#print('b: ' + str(b))
if b < bmax:
if b == 1:
binps = self.ps
else:
binps = self.ps.rebinps(b*df)
binpsname = "bin" + str(b)
# setattr(self, "bin" + str(b), binps)
bindict[binpsname] = binps
binpl = func(binps.freq, *fitparams["popt"])
binratio = 2.0*np.array(binps.ps)/binpl
#print("len(binratio): " + str(len(binratio)))
#print("mean(binratio): " + str(mean(binratio)))
maxind = np.where(binratio[1:] == max(binratio[1:]))[0]
if len(maxind) > 1:
maxind = maxind[0]
elif len(maxind) == 0 :
maxind = -2
#print('maxind: ' + str(maxind))
binmaxpow = "bmax" + str(b)
bindict[binmaxpow] = max(binratio[1:])
binmaxfreq = "bmaxfreq" + str(b)
#print("maxind[0]: " + str(maxind[0]+1))
bindict[binmaxfreq] = binps.freq[maxind+1]
#setattr(self, "bmax" + str(b), max(binratio[1:]))
#setattr(self, "b" + str(b) + "maxfreq", binps.freq[maxind+1])
bindict['binpl' + str(b)] = binpl
fitparams["bindict"] = bindict
## do a KS test comparing residuals to the exponential distribution
plks = scipy.stats.kstest(plrat/2.0, 'expon', N=len(plrat))
fitparams['ksp'] = plks[1]
if obs == True:
print("The figure-of-merit function for this model is: " + str(fitparams['merit']) + " and the fit for " + str(fitparams['dof']) + " dof is " + str(fitparams['merit']/fitparams['dof']) + ".")
print("Fitting statistics: ")
print(" -- number of frequencies: " + str(len(self.x)))
print(" -- Deviance [-2 log L] D = " + str(fitparams['deviance']))
print(" -- Highest data/model outlier 2I/S = " + str(fitparams['maxpow']))
print(" at frequency f_max = " + str(fitparams['maxfreq']))
print(" -- Highest smoothed data/model outlier for smoothing factor [3] 2I/S = " + str(fitparams['s3max']))
print(" at frequency f_max = " + str(fitparams['s3maxfreq']))
print(" -- Highest smoothed data/model outlier for smoothing factor [5] 2I/S = " + str(fitparams['s5max']))
print(" at frequency f_max = " + str(fitparams['s5maxfreq']))
print(" -- Highest smoothed data/model outlier for smoothing factor [11] 2I/S = " + str(fitparams['s11max']))
print(" at frequency f_max = " + str(fitparams['s11maxfreq']))
print(" -- Summed Residuals S = " + str(fitparams['sobs']))
print(" -- Expected S ~ " + str(fitparams['sexp']) + " +- " + str(fitparams['ssd']))
print(" -- KS test p-value (use with caution!) p = " + str(fitparams['ksp']))
print(" -- merit function (SSE) M = " + str(fitparams['merit']))
return fitparams
def compute_lrt(self, mod1, ain1, mod2, ain2, bounds1=None, bounds2=None, noise1=-1, noise2=-1, m=1, map=True, nmax=1):
### fit data with both models
par1 = self.mlest(mod1, ain1, bounds=bounds1, obs=self.obs, noise=noise1, m = m, map = map, nmax=nmax)
par2 = self.mlest(mod2, ain2, bounds=bounds2, obs=self.obs, noise=noise2, m = m, map = map, nmax=nmax)
### extract dictionaries with parameters for each
varname1 = str(mod1).split()[1] + 'fit'
varname2 = str(mod2).split()[1] + 'fit'
self.__setattr__(varname1, par1)
self.__setattr__(varname2, par2)
### compute log likelihood ratio as difference between the deviances
self.lrt = par1['deviance'] - par2['deviance']
if self.obs == True:
print("The Likelihood Ratio for models " + str(mod1).split()[1] + " and " + str(mod2).split()[1] + " is: LRT = " + str(self.lrt))
return self.lrt
#### MAXIMUM LIKELIHOOD FITTING FOR POISSON DATA
#
# This subclass implements maximum likelihood fitting
# using the modified Cash statistic as implemented in
# XSPEC
#
#
#
class LightcurveMaxLike(MaxLikelihood):
def __init__(self, lc, obs=True, fitmethod='bfgs'):
### ignore first elements in ps.freq and ps.ps (= no. of photons)
lc.time = np.array(lc.time)
self.x = lc.time
lc.counts = np.array(lc.counts)
self.y = lc.counts
self.lc = lc
### Is this a real observation or a fake periodogram to be fitted?
self.obs = obs
self.nlmflag = False
### set fitmethod
self._set_fitmethod(fitmethod)
def mlest(self, func, ain, bounds = None, obs=True, noise=None, residuals = None, nmax=1):
if not residuals is None:
counts = residuals
self.y= residuals
self.lc.counts = residuals
else:
counts = self.y
lencounts = float(len(counts))
### set function to be minimized: posterior density for periodograms:
lpost = posterior.LightcurvePosterior(self.lc, func)
lpostain = lpost(ain)
#print(lpostain)
# fitparams = self._fitting(lpost.loglikelihood, ain, bounds, neg = False, obs=obs)
fitparams = self._fitting(lpost, ain, bounds, neg = True, obs=obs)
#print("fitparams: " + str(fitparams["popt"]))
fitparams["model"] = str(func).split()[1]
fitparams["mfit"] = func(self.x, *fitparams['popt'])
#print("mfit: " + str(fitparams["mfit"]))
### calculate model power spectrum from optimal parameters
#fitparams['mfit'] = func(self.x, *fitparams['popt'])
### figure-of-merit (SSE)
fitparams['merit'] = np.sum(((self.y-fitparams['mfit'])/fitparams['mfit'])**2.0)
### find highest outlier
plrat = 2.0*(self.y/fitparams['mfit'])
fitparams['sobs'] = np.sum(plrat[1:])
## do a KS test comparing residuals to the exponential distribution
plks = scipy.stats.kstest(plrat/2.0, 'expon', N=len(plrat))
fitparams['ksp'] = plks[1]
if obs == True:
print("The figure-of-merit function for this model is: " + str(fitparams['merit']) + " and the fit for " + str(fitparams['dof']) + " dof is " + str(fitparams['merit']/fitparams['dof']) + ".")
print("Fitting statistics: ")
print(" -- Deviance [-2 log L] D = " + str(fitparams['deviance']))
print(" -- Summed Residuals S = " + str(fitparams['sobs']))
print(" -- Expected S ~ " + str(fitparams['sexp']) + " +- " + str(fitparams['ssd']))
print(" -- KS test p-value (use with caution!) p = " + str(fitparams['ksp']))
print(" -- merit function (SSE) M = " + str(fitparams['merit']))
return fitparams
########################################
########################################
class GaussMaxLike(MaxLikelihood):
def __init__(self, x, y, obs=True, fitmethod='bfgs'):
MaxLikelihood.__init__(self, x, y, obs, fitmethod)
def mlest(self, lpost, ain, func=None, bounds = None, obs=True):
lpostain = lpost(ain)
fitparams = self._fitting(lpost, ain, bounds, neg = True, obs=obs)
#model1 = str(covar).split()[1]
#fitparams['covariance function'] = model1
if func:
# model2 = str(func).split()[1]
# fitparams["function model"] = model2
fitparams["function fit"] = func(self.x, *fitparams['popt'][lpost.ncovar:])
#fitparams['merit'] = np.sum(((self.y-fitparams['mfit'])/fitparams['mfit'])**2.0)
if obs == True:
#print("The figure-of-merit function for this model is: " + str(fitparams['merit']) + " and the fit for " + str(fitparams['dof']) + " dof is " + str(fitparams['merit']/fitparams['dof']) + ".")
print("Fitting statistics: ")
print(" -- number of data points: " + str(len(self.x)))
print(" -- Deviance [-2 log L] D = " + str(fitparams['deviance']))
return fitparams
| bsd-2-clause |
Carralex/landlab | landlab/components/potentiality_flowrouting/examples/test_script_fr3.py | 6 | 8818 | # -*- coding: utf-8 -*-
"""test_script_fr3
A script of our potentiality "ghost field" flow routing method.
Created on Fri Feb 20 13:45:52 2015
@author: danhobley
"""
from __future__ import print_function
from six.moves import range
#from landlab import RasterModelGrid
#from landlab.plot.imshow import imshow_node_grid
import numpy as np
from pylab import imshow, show, contour, figure, clabel, quiver
from landlab.plot import imshow_grid_at_node
from landlab import RasterModelGrid
from matplotlib.ticker import MaxNLocator
sqrt = np.sqrt
nrows = 50
ncols = 50
#mg = RasterModelGrid(n, n, 1.)
#nt = 13000
nt = 15500
width = 1.
slope=0.1
core = (slice(1,-1),slice(1,-1))
dtwidth = 0.2
hR = np.zeros((nrows+2,ncols+2), dtype=float)
qwater_inR=np.zeros_like(hR) #WATER
qsed_inR=np.zeros_like(hR) #SED
#qwater_inR[core][0,-1]=np.pi/2.
qwater_inR[core][0,0]=1.
qwater_inR[core][0,-1]=0.5
qwater_inR[core][-1,24]=1.
#qsourceR[core][0,0]=.9*sqrt(2.)
#qsourceR[core][-1,n//2-1]=1
#qspR[core][0,-1]=np.pi/2.*(1.-slope)
#qsed_inR[core][0,0]=np.pi/2.*(1.-slope)
qsed_inR[core][0,0]=1.
qsed_inR[core][0,-1]=0.5
qsed_inR[core][-1,24]=1.
#qspR[core][0,0]=sqrt(2)
#qspR[core][-1,n//2-1]=1
flat_threshold = 0.00001
hgradEx = np.zeros_like(hR)
hgradWx = np.zeros_like(hR)
hgradNx = np.zeros_like(hR)
hgradSx = np.zeros_like(hR)
pgradEx = np.zeros_like(hR)
pgradWx = np.zeros_like(hR)
pgradNx = np.zeros_like(hR)
pgradSx = np.zeros_like(hR)
hgradEy = np.zeros_like(hR)
hgradWy = np.zeros_like(hR)
hgradNy = np.zeros_like(hR)
hgradSy = np.zeros_like(hR)
pgradEy = np.zeros_like(hR)
pgradWy = np.zeros_like(hR)
pgradNy = np.zeros_like(hR)
pgradSy = np.zeros_like(hR)
CslopeE = np.zeros_like(hR)
CslopeW = np.zeros_like(hR)
CslopeN = np.zeros_like(hR)
CslopeS = np.zeros_like(hR)
thetaE = np.zeros_like(hR)
thetaW = np.zeros_like(hR)
thetaN = np.zeros_like(hR)
thetaS = np.zeros_like(hR)
theta_vE = np.zeros_like(hR)
theta_vW = np.zeros_like(hR)
theta_vN = np.zeros_like(hR)
theta_vS = np.zeros_like(hR)
vmagE = np.zeros_like(hR)
vmagW = np.zeros_like(hR)
vmagN = np.zeros_like(hR)
vmagS = np.zeros_like(hR)
uE = np.zeros_like(hR)
uW = np.zeros_like(hR)
uN = np.zeros_like(hR)
uS = np.zeros_like(hR)
#coeffs for K solver:
aPP = np.zeros_like(hR)
aWW = np.zeros_like(hR)
aWP = np.zeros_like(hR)
aEE = np.zeros_like(hR)
aEP = np.zeros_like(hR)
aNN = np.zeros_like(hR)
aNP = np.zeros_like(hR)
aSS = np.zeros_like(hR)
aSP = np.zeros_like(hR)
qsedE = np.zeros_like(hR)
qsedW = np.zeros_like(hR)
qsedN = np.zeros_like(hR)
qsedS = np.zeros_like(hR)
K = np.zeros_like(hR)
not_flat = np.zeros((nrows,ncols), dtype=bool)
#Wchanged = np.zeros_like(hR, dtype=bool)
#Echanged = np.zeros_like(Wchanged)
#Nchanged = np.zeros_like(Wchanged)
#Schanged = np.zeros_like(Wchanged)
#uxval = np.zeros_like(hR)
#uyval = np.zeros_like(hR)
#set up slice offsets:
Es = (slice(1,-1),slice(2,ncols+2))
NEs = (slice(2,nrows+2),slice(2,ncols+2))
Ns = (slice(2,nrows+2),slice(1,-1))
NWs = (slice(2,nrows+2),slice(0,-2))
Ws = (slice(1,-1),slice(0,-2))
SWs = (slice(0,-2),slice(0,-2))
Ss = (slice(0,-2),slice(1,-1))
SEs = (slice(0,-2),slice(2,ncols+2))
for i in range(nt):
if i%100==0:
print(i)
qsedE.fill(0.)
qsedW.fill(0.)
qsedN.fill(0.)
qsedS.fill(0.)
hgradEx[core] = (hR[core]-hR[Es])#/width
hgradEy[core] = hR[SEs]-hR[NEs]+hR[Ss]-hR[Ns]
hgradEy[core] *= 0.25
CslopeE[core] = sqrt(np.square(hgradEx[core])+np.square(hgradEy[core]))
thetaE[core] = np.arctan(np.fabs(hgradEy[core])/(np.fabs(hgradEx[core])+1.e-10))
pgradEx[core] = uE[core] #pgrad is VV's vv, a velocity
pgradEy[core] = uN[core]+uS[core]+uN[Es]+uS[Es]
pgradEy[core] *= 0.25
vmagE[core] = sqrt(np.square(pgradEx[core])+np.square(pgradEy[core]))
#now resolve the effective flow magnitudes to downhill
theta_vE[core] = np.arctan(np.fabs(pgradEy[core])/(np.fabs(pgradEx[core])+1.e-10))
vmagE[core] *= np.cos(np.fabs(thetaE[core]-theta_vE[core]))
qsedE[core] = np.sign(hgradEx[core])*vmagE[core]*(CslopeE[core]-slope).clip(0.)*np.cos(thetaE[core])
#the clip should deal with the eastern edge, but return here to check if probs
hgradWx[core] = (hR[Ws]-hR[core])#/width
hgradWy[core] = hR[SWs]-hR[NWs]+hR[Ss]-hR[Ns]
hgradWy[core] *= 0.25
CslopeW[core] = sqrt(np.square(hgradWx[core])+np.square(hgradWy[core]))
thetaW[core] = np.arctan(np.fabs(hgradWy[core])/(np.fabs(hgradWx[core])+1.e-10))
pgradWx[core] = uW[core]#/width
pgradWy[core] = uN[core]+uS[core]+uN[Ws]+uS[Ws]
pgradWy[core] *= 0.25
vmagW[core] = sqrt(np.square(pgradWx[core])+np.square(pgradWy[core]))
theta_vW[core] = np.arctan(np.fabs(pgradWy[core])/(np.fabs(pgradWx[core])+1.e-10))
vmagW[core] *= np.cos(np.fabs(thetaW[core]-theta_vW[core]))
qsedW[core] = np.sign(hgradWx[core])*vmagW[core]*(CslopeW[core]-slope).clip(0.)*np.cos(thetaW[core])
hgradNx[core] = hR[NWs]-hR[NEs]+hR[Ws]-hR[Es]
hgradNx[core] *= 0.25
hgradNy[core] = (hR[core]-hR[Ns])#/width
CslopeN[core] = sqrt(np.square(hgradNx[core])+np.square(hgradNy[core]))
thetaN[core] = np.arctan(np.fabs(hgradNy[core])/(np.fabs(hgradNx[core])+1.e-10))
pgradNx[core] = uE[core]+uW[core]+uE[Ns]+uW[Ns]
pgradNx[core] *= 0.25
pgradNy[core] = uN[core]#/width
vmagN[core] = sqrt(np.square(pgradNx[core])+np.square(pgradNy[core]))
theta_vN[core] = np.arctan(np.fabs(pgradNy[core])/(np.fabs(pgradNx[core])+1.e-10))
vmagN[core] *= np.cos(np.fabs(thetaN[core]-theta_vN[core]))
qsedN[core] = np.sign(hgradNy[core])*vmagN[core]*(CslopeN[core]-slope).clip(0.)*np.sin(thetaN[core])
hgradSx[core] = hR[SWs]-hR[SEs]+hR[Ws]-hR[Es]
hgradSx[core] *= 0.25
hgradSy[core] = (hR[Ss]-hR[core])#/width
CslopeS[core] = sqrt(np.square(hgradSx[core])+np.square(hgradSy[core]))
thetaS[core] = np.arctan(np.fabs(hgradSy[core])/(np.fabs(hgradSx[core])+1.e-10))
pgradSx[core] = uE[core]+uW[core]+uE[Ss]+uW[Ss]
pgradSx[core] *= 0.25
pgradSy[core] = uS[core]#/width
vmagS[core] = sqrt(np.square(pgradSx[core])+np.square(pgradSy[core]))
theta_vS[core] = np.arctan(np.fabs(pgradSy[core])/(np.fabs(pgradSx[core])+1.e-10))
vmagS[core] *= np.cos(np.fabs(thetaS[core]-theta_vS[core]))
qsedS[core] = np.sign(hgradSy[core])*vmagS[core]*(CslopeS[core]-slope).clip(0.)*np.sin(thetaS[core])
hR[core] += dtwidth*(qsedS[core]+qsedW[core]-qsedN[core]-qsedE[core]+qsed_inR[core])
#update the dummy edges of our variables:
hR[0,1:-1] = hR[1,1:-1]
hR[-1,1:-1] = hR[-2,1:-1]
hR[1:-1,0] = hR[1:-1,1]
hR[1:-1,-1] = hR[1:-1,-2]
hR[(0,-1,0,-1),(0,-1,-1,0)] = hR[(1,-2,1,-2),(1,-2,-2,1)]
###P SOLVER
#not_flat = np.greater(hR[core],flat_threshold)
#not_mask = np.logical_not(mask)
aNN[core] = (-hR[core]+hR[Ns]).clip(0.)
aNP[core] = (hR[core]-hR[Ns]).clip(0.)
aSS[core] = (-hR[core]+hR[Ss]).clip(0.)
aSP[core] = (hR[core]-hR[Ss]).clip(0.)
aEE[core] = (-hR[core]+hR[Es]).clip(0.)
aEP[core] = (hR[core]-hR[Es]).clip(0.)
aWW[core] = (-hR[core]+hR[Ws]).clip(0.)
aWP[core] = (hR[core]-hR[Ws]).clip(0.)
aPP[core] = aWP[core]+aEP[core]+aSP[core]+aNP[core]+1.e-6
for j in range(15):
#assert np.all(np.greater(aPP[core][not_flat],0.)) #this is here to eliminate a divby0
#K[core][not_flat] = ((aWW[core]*K[Ws]+aEE[core]*K[Es]+aSS[core]*K[Ss]+aNN[core]*K[Ns]
# +qwater_inR[core])[not_flat])/aPP[core][not_flat]
K[core] = (aWW[core]*K[Ws]+aEE[core]*K[Es]+aSS[core]*K[Ss]+aNN[core]*K[Ns]
+qwater_inR[core])/aPP[core]
for BC in (K,):
BC[0,1:-1] = BC[1,1:-1]
BC[-1,1:-1] = BC[-2,1:-1]
BC[1:-1,0] = BC[1:-1,1]
BC[1:-1,-1] = BC[1:-1,-2]
BC[(0,-1,0,-1),(0,-1,-1,0)] = BC[(1,-2,1,-2),(1,-2,-2,1)]
uW[core] = aWW[core]*K[Ws]-aWP[core]*K[core]
uE[core] = -aEE[core]*K[Es]+aEP[core]*K[core]
uN[core] = -aNN[core]*K[Ns]+aNP[core]*K[core]
uS[core] = aSS[core]*K[Ss]-aSP[core]*K[core]
#update the u BCs
for BC in (uW,uE,uN,uS):
BC[0,1:-1] = BC[1,1:-1]
BC[-1,1:-1] = BC[-2,1:-1]
BC[1:-1,0] = BC[1:-1,1]
BC[1:-1,-1] = BC[1:-1,-2]
BC[(0,-1,0,-1),(0,-1,-1,0)] = BC[(1,-2,1,-2),(1,-2,-2,1)]
X,Y = np.meshgrid(np.arange(ncols),np.arange(nrows))
uval = uW[core]+uE[core]
vval = uN[core]+uS[core]
#velmag = sqrt(uval**2 + vval**2)
#uval /= velmag
#vval /= velmag
#imshow_node_grid(mg, h)
figure(1)
mg = RasterModelGrid((nrows, ncols))
f1 = imshow_grid_at_node(mg, hR[core].flatten(), grid_units=('m', 'm'))
figure(2)
f2 = contour(X,Y,hR[core], locator=MaxNLocator(nbins=100))
# f2 = contour(X, Y, np.sqrt(uval**2+vval**2), locator=MaxNLocator(nbins=10))
clabel(f2)
quiver(X,Y,uval,vval)
| mit |
jandom/GromacsWrapper | gromacs/formats.py | 1 | 1486 | # GromacsWrapper: formats.py
# Copyright (c) 2009-2010 Oliver Beckstein <[email protected]>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
""":mod:`gromacs.formats` -- Accessing various files
=================================================
This module contains classes that represent data files on
disk. Typically one creates an instance and
- reads from a file using a :meth:`read` method, or
- populates the instance (in the simplest case with a :meth:`set`
method) and the uses the :meth:`write` method to write the data to
disk in the appropriate format.
For function data there typically also exists a :meth:`plot` method
which produces a graph (using matplotlib).
The module defines some classes that are used in other modules; they
do *not* make use of :mod:`gromacs.tools` or :mod:`gromacs.cbook` and
can be safely imported at any time.
.. SeeAlso::
This module gives access to a selection of classes from
:mod:`gromacs.fileformats`.
Classes
-------
.. autoclass:: XVG
:members:
.. autoclass:: NDX
:members:
.. autoclass:: uniqueNDX
:members:
.. autoclass:: MDP
:members:
.. autoclass:: ITP
:members:
.. autoclass:: XPM
:members:
.. autoclass:: TOP
:members:
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
__all__ = ["XVG", "MDP", "NDX", "uniqueNDX", "ITP", "XPM", "TOP"]
from .fileformats import XVG, MDP, NDX, uniqueNDX, ITP, XPM, TOP
| gpl-3.0 |
rseubert/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
deepesch/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
poryfly/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
Debaq/Triada | FullAxis_GUI/DB/BASE DE DATOS EXPERIMENTO/experimento 3/eduardo pailahual/medidor3.py | 27 | 3052 | import argparse
import sys
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import json
parser = argparse.ArgumentParser(description="Does some awesome things.")
parser.add_argument('message', type=str, help="pass a message into the script")
args = parser.parse_args(sys.argv[1:])
data = []
New_data=[]
dt=[]
with open(args.message) as json_file:
data = json.load(json_file)
def graph(grid,d_tiempo):
plt.switch_backend('TkAgg') #default on my system
f = plt.figure(num=args.message, figsize=(20,15))
mng = plt._pylab_helpers.Gcf.figs.get(f.number, None)
print(New_data)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.title(args.message)
if grid == 1:
tempo = d_tiempo
tempo_init = tempo[0]
tempo_end = tempo[-1]
gs1 = GridSpec(4, 1)
gs1.update(left=0.05, right=0.95, wspace=0.5, hspace=0.3, bottom=0.08)
ax1 = plt.subplot(gs1[0, :])
ax1.grid()
ax1.set_ylabel('Pitch',fontsize=8)
if grid ==1:
L1 = ax1.plot(d_tiempo,New_data['pitch'])
else:
L1 = ax1.plot(d_tiempo,data['pitch'])
ax2 = plt.subplot(gs1[1, :])
ax2.grid()
ax2.set_ylabel('Roll',fontsize=8)
if grid ==1:
L1 = ax2.plot(d_tiempo,New_data['roll'])
else:
L1 = ax2.plot(d_tiempo,data['roll'])
ax3 = plt.subplot(gs1[2, :])
ax3.grid()
ax3.set_ylabel('Yaw',fontsize=8)
if grid ==1:
L1 = ax3.plot(d_tiempo,New_data['yaw'])
else:
L1 = ax3.plot(d_tiempo,data['yaw'])
ax4 = plt.subplot(gs1[3, :])
ax4.grid()
ax4.set_ylabel('Tiempo',fontsize=8)
if grid ==1:
L1 = ax4.plot(d_tiempo,New_data['ledblue'])
L2 = ax4.plot(d_tiempo,New_data['ledred'])
else:
L1 = ax4.plot(d_tiempo,data['ledblue'])
L2 = ax4.plot(d_tiempo,data['ledred'])
plt.show()
def find_nearest(array,values):
idx = np.abs(np.subtract.outer(array, values)).argmin(0)
return idx
def corte(init_cut,end_cut,a,b,c,d,e,f,g,h,i):
a=a[init_cut:end_cut]
b=b[init_cut:end_cut]
c=c[init_cut:end_cut]
d=d[init_cut:end_cut]
e=e[init_cut:end_cut]
f=f[init_cut:end_cut]
g=g[init_cut:end_cut]
h=h[init_cut:end_cut]
i=i[init_cut:end_cut]
datos={'roll':a,'pitch':b,'yaw':c, 'X':d, 'Y':e, 'Z':f,'time':g, 'ledblue':h, 'ledred':i}
return datos
def reset_tempo(var_in,var_out):
uni = var_in[0]
for t in range(0,len(var_in)):
var_out.append(round((var_in[t]-uni),3))
return var_out
graph(0,data['time'])
init_cut = float(input("tiempo inicial: "))
init_cuty = find_nearest(data['time'],init_cut)
end_cut = float(input("tiempo final: "))
end_cuty = find_nearest(data['time'],end_cut)
New_data=corte(init_cuty,end_cuty,data['pitch'],data['roll'],data['yaw'],data['X'],data['Y'],data['Z'],data['time'],data['ledblue'],data['ledred'])
data = []
print(data)
data = New_data
print(data)
dt = reset_tempo(New_data['time'],dt)
graph(0,dt)
| gpl-3.0 |
tebeka/arrow | python/pyarrow/tests/pandas_examples.py | 5 | 5149 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from datetime import date, time
import numpy as np
import pandas as pd
import pyarrow as pa
def dataframe_with_arrays(include_index=False):
"""
Dataframe with numpy arrays columns of every possible primtive type.
Returns
-------
df: pandas.DataFrame
schema: pyarrow.Schema
Arrow schema definition that is in line with the constructed df.
"""
dtypes = [('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('f4', pa.float32()), ('f8', pa.float64())]
arrays = OrderedDict()
fields = []
for dtype, arrow_dtype in dtypes:
fields.append(pa.field(dtype, pa.list_(arrow_dtype)))
arrays[dtype] = [
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
]
fields.append(pa.field('str', pa.list_(pa.string())))
arrays['str'] = [
np.array([u"1", u"ä"], dtype="object"),
None,
np.array([u"1"], dtype="object"),
np.array([u"1", u"2", u"3"], dtype="object")
]
fields.append(pa.field('datetime64', pa.list_(pa.timestamp('ms'))))
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
if include_index:
fields.append(pa.field('__index_level_0__', pa.int64()))
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
return df, schema
def dataframe_with_lists(include_index=False, parquet_compatible=False):
"""
Dataframe with list columns of every possible primtive type.
Returns
-------
df: pandas.DataFrame
schema: pyarrow.Schema
Arrow schema definition that is in line with the constructed df.
parquet_compatible: bool
Exclude types not supported by parquet
"""
arrays = OrderedDict()
fields = []
fields.append(pa.field('int64', pa.list_(pa.int64())))
arrays['int64'] = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
None,
[],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9] * 2,
dtype=np.int64)[::2]
]
fields.append(pa.field('double', pa.list_(pa.float64())))
arrays['double'] = [
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.],
None,
[],
np.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.] * 2)[::2],
]
fields.append(pa.field('bytes_list', pa.list_(pa.binary())))
arrays['bytes_list'] = [
[b"1", b"f"],
None,
[b"1"],
[b"1", b"2", b"3"],
[],
]
fields.append(pa.field('str_list', pa.list_(pa.string())))
arrays['str_list'] = [
[u"1", u"ä"],
None,
[u"1"],
[u"1", u"2", u"3"],
[],
]
date_data = [
[],
[date(2018, 1, 1), date(2032, 12, 30)],
[date(2000, 6, 7)],
None,
[date(1969, 6, 9), date(1972, 7, 3)]
]
time_data = [
[time(23, 11, 11), time(1, 2, 3), time(23, 59, 59)],
[],
[time(22, 5, 59)],
None,
[time(0, 0, 0), time(18, 0, 2), time(12, 7, 3)]
]
temporal_pairs = [
(pa.date32(), date_data),
(pa.date64(), date_data),
(pa.time32('s'), time_data),
(pa.time32('ms'), time_data),
(pa.time64('us'), time_data)
]
if not parquet_compatible:
temporal_pairs += [
(pa.time64('ns'), time_data),
]
for value_type, data in temporal_pairs:
field_name = '{}_list'.format(value_type)
field_type = pa.list_(value_type)
field = pa.field(field_name, field_type)
fields.append(field)
arrays[field_name] = data
if include_index:
fields.append(pa.field('__index_level_0__', pa.int64()))
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
return df, schema
| apache-2.0 |
jorgehog/Deux-kMC | scripts/felix_cav/sequential_analyze.py | 1 | 3360 | import sys
import os
import numpy as np
from os.path import join
from matplotlib.pylab import *
sys.path.append(join(os.getcwd(), ".."))
from parse_h5_output import ParseKMCHDF5
from intercombinatorzor import ICZ
def find_front_pos(heights):
return heights.mean()
#9 14 17 39 43 56 59 61 62 64
def main():
input_file = sys.argv[1]
skiplist = []
if len(sys.argv) > 2:
skiplist = [int(x) for x in sys.argv[2:]]
parser = ParseKMCHDF5(input_file)
def skip(data):
return data.attrs["flux"] != 2.40
every = 1000
thermRatio = 0.25
l = None
n_entries = 0
for data, L, W, run_id in parser:
if skip(data):
continue
if not l:
l = len(data["time"])
n_entries += 1
therm = l*thermRatio
nbins = 20
cmat = np.zeros(shape=(n_entries, nbins))
dy = W/float(nbins)
combinator = ICZ("Time", "ys")
Cmean = 0
cmeancount = 0
entry_count = 0
for data, L, W, run_id in parser:
if skip(data):
continue
conf_height = data.attrs["height"]
stored_heights = data["stored_heights"]
stored_particles = data["stored_particles"]
stored_heights_indices = sorted(stored_heights, key=lambda x: int(x))
time = data["time"][()]
ys_vec = np.zeros(len(time)/every)
#Shift with 1 to translate from starting time till ending times
t_prev = 0
tot_weight = 0
for hi, heights_id in enumerate(stored_heights_indices):
if hi % every != 0:
continue
heights = stored_heights[heights_id][()].transpose()
ys = find_front_pos(heights)
ys_vec[hi/every] = ys
if hi < len(time) - 1:
t_new = time[hi+1]
if hi >= therm:
if heights_id in stored_particles:
particles = stored_particles[heights_id][()]
else:
particles = []
dt = t_new - t_prev
for x, y, _ in particles:
xl = round(x)
yl = round(y)
dh = conf_height - heights[xl, yl] - 1
cmat[entry_count, int((y+0.5)/dy)] += dt/dh
tot_weight += dt
t_prev = t_new
if hi % 100 == 0:
sys.stdout.flush()
print "\r%d/%d" % (hi, len(stored_heights)),
cmat[entry_count, :] /= tot_weight
sys.stdout.flush()
print
print "\rfin %d / %d" % (entry_count+1, n_entries)
if skiplist:
if entry_count + 1 in skiplist:
ans = "asd"
else:
ans = ""
else:
plot(time[::every], ys_vec)
show()
ans = raw_input("discard? (n)")
if ans == "":
combinator.feed(time[::every], ys_vec)
Cmean += cmat[entry_count, :]
cmeancount += 1
entry_count += 1
print
Cmean /= cmeancount
ti, ys_veci = combinator.intercombine("Time", "ys")
np.save("/tmp/FelixSeqC_t.npy", ti)
np.save("/tmp/FelixSeqC_ys.npy", ys_veci)
np.save("/tmp/FelixSeqC_C.npy", Cmean)
if __name__ == "__main__":
main()
| gpl-3.0 |
lucabaldini/ximpol | ximpol/examples/grb_swift_lc.py | 1 | 2862 | #!/usr/bin/env python
#
# Copyright (C) 2015--2016, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU GengReral Public Licensese as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import numpy
import random
from ximpol.utils.logging_ import logger
from ximpol.utils.matplotlib_ import pyplot as plt
from ximpol.config.grb_swift_download import download_swift_grb_lc_file
from ximpol.config.grb_swift_download import get_all_swift_grb_names
from ximpol.config.grb_utils import parse_light_curve
from ximpol.utils.matplotlib_ import overlay_tag, save_current_figure
def plot_swift_lc(grb_list,show=True):
"""Plots Swift GRB light curves.
"""
plt.figure(figsize=(10, 8), dpi=80)
plt.title('Swift XRT light curves')
num_grb = 0
for grb_name in grb_list:
flux_outfile = download_swift_grb_lc_file(grb_name, min_obs_time=21600)
if flux_outfile is not None:
integral_flux_spline = parse_light_curve(flux_outfile)
if integral_flux_spline is not None:
if grb_name == 'GRB 130427A':
integral_flux_spline.plot(num_points=1000,logx=True,\
logy=True,show=False,\
color="red",linewidth=1.0)
num_grb += 1
else:
c = random.uniform(0.4,0.8)
integral_flux_spline.plot(num_points=1000,logx=True,\
logy=True,show=False,\
color='%f'%c,linewidth=1.0)
num_grb += 1
else:
continue
logger.info('%i GRBs included in the plot.'%num_grb)
if show:
plt.show()
def main(interactive=False):
"""Test the script plotting the light curve og GRB 130427A
"""
#If you want all the GRBs use the following line:
grb_list = get_all_swift_grb_names()
#grb_list = ['GRB 130427A','GRB 050124']
plot_swift_lc(grb_list,show=False)
overlay_tag()
save_current_figure('Swift_XRT_light_curves',
clear=False)
if interactive:
plt.show()
if __name__=='__main__':
main(interactive=sys.flags.interactive)
| gpl-3.0 |
255BITS/HyperGAN | examples/common.py | 1 | 14294 | import matplotlib.pyplot as plt
import matplotlib.style
import matplotlib as mpl
import argparse
import tensorflow as tf
import hypergan as hg
import hyperchamber as hc
import numpy as np
import random
from hypergan.cli import CLI
from hypergan.gan_component import GANComponent
from hypergan.search.random_search import RandomSearch
from hypergan.generators.base_generator import BaseGenerator
from hypergan.samplers.base_sampler import BaseSampler
class ArgumentParser:
def __init__(self, description, require_directory=True):
self.require_directory = require_directory
self.parser = argparse.ArgumentParser(description=description, add_help=True)
self.add_global_arguments()
self.add_search_arguments()
self.add_train_arguments()
def add_global_arguments(self):
parser = self.parser
parser.add_argument('action', action='store', type=str, help='One of ["train", "search"]')
if self.require_directory:
parser.add_argument('directory', action='store', type=str, help='The location of your data. Subdirectories are treated as different classes. You must have at least 1 subdirectory.')
parser.add_argument('--config', '-c', type=str, default='default', help='config name')
parser.add_argument('--device', '-d', type=str, default='/gpu:0', help='In the form "/gpu:0", "/cpu:0", etc. Always use a GPU (or TPU) to train')
parser.add_argument('--batch_size', '-b', type=int, default=32, help='Number of samples to include in each batch. If using batch norm, this needs to be preserved when in server mode')
parser.add_argument('--steps', type=int, default=1000000, help='Number of steps to train for.')
parser.add_argument('--noviewer', dest='viewer', action='store_false', help='Disables the display of samples in a window.')
parser.add_argument('--save_samples', dest='save_samples', action='store_true', help='Saves samples to disk.')
def add_search_arguments(self):
parser = self.parser
parser.add_argument('--config_list', '-m', type=str, default=None, help='config list name')
parser.add_argument('--search_output', '-o', type=str, default="search.csv", help='output file for search results')
def add_train_arguments(self):
parser = self.parser
parser.add_argument('--sample_every', type=int, default=50, help='Samples the model every n epochs.')
parser.add_argument('--save_every', type=int, default=1000, help='Samples the model every n epochs.')
def add_image_arguments(self):
parser = self.parser
parser.add_argument('--crop', type=bool, default=False, help='If your images are perfectly sized you can skip cropping.')
parser.add_argument('--format', '-f', type=str, default='png', help='jpg or png')
parser.add_argument('--size', '-s', type=str, default='64x64x3', help='Size of your data. For images it is widthxheightxchannels.')
parser.add_argument('--zoom', '-z', type=int, default=1, help='Zoom level')
parser.add_argument('--sampler', type=str, default=None, help='Select a sampler. Some choices: static_batch, batch, grid, progressive')
return parser
def parse_args(self):
return self.parser.parse_args()
class CustomGenerator(BaseGenerator):
def create(self):
gan = self.gan
config = self.config
ops = self.ops
end_features = config.end_features or 1
ops.describe('custom_generator')
net = gan.inputs.x
net = ops.linear(net, end_features)
net = ops.lookup('tanh')(net)
self.sample = net
return net
class Custom2DGenerator(BaseGenerator):
def create(self):
gan = self.gan
config = self.config
ops = self.ops
end_features = config.end_features or 1
ops.describe('custom_generator')
net = gan.latent.sample
for i in range(2):
net = ops.linear(net, 16)
net = ops.lookup('bipolar')(net)
net = ops.linear(net, end_features)
print("-- net is ", net)
self.sample = net
return net
class CustomDiscriminator(BaseGenerator):
def build(self, net):
gan = self.gan
config = self.config
ops = self.ops
end_features = 1
x = gan.inputs.x
y = gan.inputs.y
g = gan.generator.sample
gnet = tf.concat(axis=1, values=[x,g])
ynet = tf.concat(axis=1, values=[x,y])
net = tf.concat(axis=0, values=[ynet, gnet])
net = ops.linear(net, 128)
net = tf.nn.tanh(net)
self.sample = net
return net
class Custom2DDiscriminator(BaseGenerator):
def __init__(self, gan, config, g=None, x=None, name=None, input=None, reuse=None, features=[], skip_connections=[]):
self.x = x
self.g = g
GANComponent.__init__(self, gan, config, name=name, reuse=reuse)
def create(self):
gan = self.gan
if self.x is None:
self.x = gan.inputs.x
if self.g is None:
self.g = gan.generator.sample
net = tf.concat(axis=0, values=[self.x,self.g])
net = self.build(net)
self.sample = net
return net
def build(self, net):
gan = self.gan
config = self.config
ops = self.ops
layers=2
end_features = 1
for i in range(layers):
net = ops.linear(net, 16)
net = ops.lookup('bipolar')(net)
net = ops.linear(net, 1)
self.sample = net
return net
def reuse(self, net):
self.ops.reuse()
net = self.build(net)
self.ops.stop_reuse()
return net
class Custom2DSampler(BaseSampler):
def sample(self, filename, save_samples):
gan = self.gan
generator = gan.generator.sample
sess = gan.session
config = gan.config
x_v, z_v = sess.run([gan.inputs.x, gan.latent.sample])
sample = sess.run(generator, {gan.inputs.x: x_v, gan.latent.sample: z_v})
X, Y = np.meshgrid(np.arange(-1.2, 1.2, .1), np.arange(-1.2, 1.2, .1))
U = np.cos(X)
V = np.sin(Y)
mpl.style.use('classic')
plt.clf()
#fig = plt.figure(figsize=(3,3))
fig = plt.figure()
plt.scatter(*zip(*x_v), c='b')
plt.scatter(*zip(*sample), c='r')
q = plt.quiver(X,Y,U,V, color='k', units='width')
qk = plt.quiverkey(q, 0.9, 0.9, 2, r'$2 \frac{m}{s}$', labelpos='E', coordinates='figure')
#plt.xlim([-2, 2])
#plt.ylim([-2, 2])
#plt.ylabel("z")
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
#plt.savefig(filename)
self.plot(data, filename, save_samples)
return [{'image': filename, 'label': '2d'}]
class Custom2DInputDistribution:
def __init__(self, args):
with tf.device(args.device):
def circle(x):
spherenet = tf.square(x)
spherenet = tf.reduce_sum(spherenet, 1)
lam = tf.sqrt(spherenet)
return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])
def modes(x):
shape = x.get_shape()
return tf.round(x*2)/2.0#+tf.random_normal(shape, 0, 0.04)
if args.distribution == 'circle':
x = tf.random_normal([args.batch_size, 2])
x = circle(x)
elif args.distribution == 'modes':
x = tf.random_uniform([args.batch_size, 2], -1, 1)
x = modes(x)
elif args.distribution == 'sin':
x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
x = tf.transpose(x)
r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
x = tf.concat([xy,x], 1)/16.0
elif args.distribution == 'arch':
offset1 = tf.random_uniform((1, args.batch_size), -10, 10 )
xa = tf.random_uniform((1, 1), 1, 4 )
xb = tf.random_uniform((1, 1), 1, 4 )
x1 = tf.random_uniform((1, args.batch_size), -1, 1 )
xcos = tf.cos(x1*np.pi + offset1)*xa
xsin = tf.sin(x1*np.pi + offset1)*xb
x = tf.transpose(tf.concat([xcos,xsin], 0))/16.0
elif args.distribution == 'static-point':
x = tf.ones([args.batch_size, 2])
self.x = x
self.xy = tf.zeros_like(self.x)
def batch_diversity(net):
bs = int(net.get_shape()[0])
avg = tf.reduce_mean(net, axis=0)
s = [int(x) for x in avg.get_shape()]
avg = tf.reshape(avg, [1, s[0], s[1], s[2]])
tile = [1 for x in net.get_shape()]
tile[0] = bs
avg = tf.tile(avg, tile)
net -= avg
return tf.reduce_sum(tf.abs(net))
def distribution_accuracy(a, b):
"""
Each point of a is measured against the closest point on b. Distance differences are added together.
This works best on a large batch of small inputs."""
tiled_a = a
tiled_a = tf.reshape(tiled_a, [int(tiled_a.get_shape()[0]), 1, int(tiled_a.get_shape()[1])])
tiled_a = tf.tile(tiled_a, [1, int(tiled_a.get_shape()[0]), 1])
tiled_b = b
tiled_b = tf.reshape(tiled_b, [1, int(tiled_b.get_shape()[0]), int(tiled_b.get_shape()[1])])
tiled_b = tf.tile(tiled_b, [int(tiled_b.get_shape()[0]), 1, 1])
difference = tf.abs(tiled_a-tiled_b)
difference = tf.reduce_min(difference, axis=1)
difference = tf.reduce_sum(difference, axis=1)
return tf.reduce_sum(difference, axis=0)
def batch_accuracy(a, b):
"Difference from a to b. Meant for reconstruction measurements."
difference = tf.abs(a-b)
difference = tf.reduce_min(difference, axis=1)
difference = tf.reduce_sum(difference, axis=1)
return tf.reduce_sum( tf.reduce_sum(difference, axis=0) , axis=0)
class TextInput:
def __init__(self, config, batch_size, one_hot=False):
self.lookup = None
reader = tf.TextLineReader()
filename_queue = tf.train.string_input_producer(["chargan.txt"])
key, x = reader.read(filename_queue)
vocabulary = self.get_vocabulary()
table = tf.contrib.lookup.string_to_index_table_from_tensor(
mapping = vocabulary, default_value = 0)
x = tf.string_join([x, tf.constant(" " * 64)])
x = tf.substr(x, [0], [64])
x = tf.string_split(x,delimiter='')
x = tf.sparse_tensor_to_dense(x, default_value=' ')
x = tf.reshape(x, [64])
x = table.lookup(x)
self.one_hot = one_hot
if one_hot:
x = tf.one_hot(x, len(vocabulary))
x = tf.cast(x, dtype=tf.float32)
x = tf.reshape(x, [1, int(x.get_shape()[0]), int(x.get_shape()[1]), 1])
else:
x = tf.cast(x, dtype=tf.float32)
x -= len(vocabulary)/2.0
x /= len(vocabulary)/2.0
x = tf.reshape(x, [1,1, 64, 1])
num_preprocess_threads = 8
x = tf.train.shuffle_batch(
[x],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity= 5000,
min_after_dequeue=500,
enqueue_many=True)
self.x = x
self.table = table
def inputs(self):
return [self.x]
def get_vocabulary(self):
vocab = list("~()\"'&+#@/789zyxwvutsrqponmlkjihgfedcba ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456:-,;!?.")
return vocab
def np_one_hot(index, length):
return np.eye(length)[index]
def get_character(self, data):
return self.get_lookup_table()[data]
def get_lookup_table(self):
if self.lookup is None:
vocabulary = self.get_vocabulary()
values = np.arange(len(vocabulary))
lookup = {}
if self.one_hot:
for i, key in enumerate(vocabulary):
lookup[key]=self.np_one_hot(values[i], len(values))
else:
for i, key in enumerate(vocabulary):
lookup[key]=values[i]
#reverse the hash
lookup = {i[1]:i[0] for i in lookup.items()}
self.lookup = lookup
return self.lookup
def text_plot(self, size, filename, data, x):
bs = x.shape[0]
data = np.reshape(data, [bs, -1])
x = np.reshape(x, [bs, -1])
plt.clf()
plt.figure(figsize=(2,2))
data = np.squeeze(data)
plt.plot(x)
plt.plot(data)
plt.xlim([0, size])
plt.ylim([-2, 2.])
plt.ylabel("Amplitude")
plt.xlabel("Time")
plt.savefig(filename)
def sample_output(self, val):
vocabulary = self.get_vocabulary()
if self.one_hot:
vals = [ np.argmax(r) for r in val ]
ox_val = [vocabulary[obj] for obj in list(vals)]
string = "".join(ox_val)
return string
else:
val = np.reshape(val, [-1])
val *= len(vocabulary)/2.0
val += len(vocabulary)/2.0
val = np.round(val)
val = np.maximum(0, val)
val = np.minimum(len(vocabulary)-1, val)
ox_val = [self.get_character(obj) for obj in list(val)]
string = "".join(ox_val)
return string
def lookup_sampler(name):
return CLI.sampler_for(name, name)
def parse_size(size):
width = int(size.split("x")[0])
height = int(size.split("x")[1])
channels = int(size.split("x")[2])
return [width, height, channels]
def lookup_config(args):
if args.action != 'search':
return hg.configuration.Configuration.load(args.config+".json")
def random_config_from_list(config_list_file):
""" Chooses a random configuration from a list of configs (separated by newline) """
lines = tuple(open(config_list_file, 'r'))
config_file = random.choice(lines).strip()
print("[hypergan] config file chosen from list ", config_list_file, ' file:', config_file)
return hg.configuration.Configuration.load(config_file+".json")
| mit |
r-mart/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
zasdfgbnm/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ligovirgo/seismon | RfPrediction/old/cause_lockstate_2.py | 2 | 26468 | #!/usr/bin/python
from __future__ import division
import os, sys, glob, optparse, warnings, time, json
import numpy as np
import subprocess
from subprocess import Popen
from lxml import etree
from scipy.interpolate import interp1d
from gwpy.timeseries import TimeSeries
import lal.gpstime
#from seismon import (eqmon, utils)
import matplotlib
matplotlib.use("AGG")
matplotlib.rcParams.update({'font.size': 18})
from matplotlib import pyplot as plt
from matplotlib import cm
def parse_commandline():
"""@parse the options given on the command-line.
"""
parser = optparse.OptionParser(usage=__doc__)
parser.add_option("-t", "--time_after_p_wave", help="time to check for lockloss status after p wave arrival.",
default = 3600)
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Run verbosely. (Default: False)")
opts, args = parser.parse_args()
# show parameters
if opts.verbose:
print >> sys.stderr, ""
print >> sys.stderr, "running network_eqmon..."
print >> sys.stderr, "version: %s"%__version__
print >> sys.stderr, ""
print >> sys.stderr, "***************** PARAMETERS ********************"
for o in opts.__dict__.items():
print >> sys.stderr, o[0]+":"
print >> sys.stderr, o[1]
print >> sys.stderr, ""
return opts
## LHO
rms_toggle = ''
os.system('mkdir -p /home/eric.coughlin/H1O1/')
os.system('mkdir -p /home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/')
for direction in ['Z','X','Y']:
if rms_toggle == "":
channel = 'H1:ISI-GND_STS_HAM2_{0}_DQ'.format(direction)
elif rms_toggle == "RMS_":
channel = 'H1:ISI-GND_STS_HAM5_{0}_BLRMS_30M_100M'.format(direction)
H1_lock_time_list = []
H1_lockloss_time_list = []
H1_peak_ground_velocity_list = []
hdir = os.environ["HOME"]
options = parse_commandline()
predicted_peak_ground_velocity_list = []
datafileH1 = open('{0}/gitrepo/seismon/RfPrediction/data/LHO_O1_{1}{2}_4.txt'.format(hdir, rms_toggle, direction), 'r')
resultfileH1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LHO_lockstatus_{0}{1}_4.txt'.format(rms_toggle, direction), 'w')
H1_channel_lockstatus_data = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/segs_Locked_H_1126569617_1136649617.txt', 'r')
# This next section of code is where the data is seperated into two lists to make this data easier to search through and process.
for item in (line.strip().split() for line in H1_channel_lockstatus_data):
H1_lock_time = item[0]
H1_lockloss_time = item[1]
H1_lock_time_list.append(float(H1_lock_time))
H1_lockloss_time_list.append(float(H1_lockloss_time))
#resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} \n'.format('eq arrival time','pw arrival time','peak ground velocity','lockloss'))
for column in ( line.strip().split() for line in datafileH1):
eq_time = column[0] # This is the time that the earthquake was detected
eq_mag = column[1]
pw_arrival_time = column[2] #this is the arrival time of the pwave
sw_arrival_time = column[3]
eq_distance = column[12]
eq_depth = column[13]
rw_arrival_time = column[5] #this is the arrival time of rayleigh wave
peak_ground_velocity = column[14] # this is the peak ground velocity during the time of the earthquake.
predicted_peak_ground_velocity = column[7]
predicted_peak_ground_velocity_list.append(float(predicted_peak_ground_velocity))
# The next function is designed to to take a list and find the first item in that list that matches the conditions. If an item is not in the list that matches the condition it is possible to set a default value which will prevent the program from raising an error otherwise.
#H1_lock_time = next((item for item in H1_lock_time_list if min(H1_lock_time_list, key=lambda x:abs(x-float(pw_arrival_time)))),[None])
#H1_lockloss_time = next((item for item in H1_lockloss_time_list if min(H1_lockloss_time_list, key=lambda x:abs(x-float(float(pw_arrival_time)+float(options.time_after_p_wave))))),[None])
H1_lock_time = min(H1_lock_time_list, key=lambda x:abs(x-float(pw_arrival_time)))
H1_lockloss_time = min(H1_lockloss_time_list, key=lambda x:abs(x-float(float(pw_arrival_time) + float(options.time_after_p_wave))))
lockloss = ""
if (H1_lock_time <= float(pw_arrival_time) and H1_lockloss_time <= float(float(pw_arrival_time) + float(options.time_after_p_wave))): # The if statements are designed to check if the interferometer is in lock or not and if it is. Did it lose lock around the time of the earthquake?
lockloss = "Y"
resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss))
elif (H1_lock_time <= float(pw_arrival_time) and H1_lockloss_time > float(float(pw_arrival_time) + float(options.time_after_p_wave))):
lockloss = "N"
resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss))
elif (H1_lock_time > float(pw_arrival_time)):
lockloss = "Z"
resultfileH1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss))
datafileH1.close()
resultfileH1.close()
H1_channel_lockstatus_data.close()
eq_time_list = []
locklosslist = []
pw_arrival_list = []
peak_acceleration_list = []
peak_displacement_list = []
eq_mag_list = []
eq_distance_list = []
eq_depth_list = []
resultfileplotH1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LHO_lockstatus_{0}{1}_4.txt'.format(rms_toggle, direction), 'r')
for item in (line.strip().split() for line in resultfileplotH1):
eq_time = item[0]
pw_arrival = item[1]
peakgroundvelocity = item[2]
eq_mag = item[3]
eq_distance = item[4]
eq_depth = item[5]
lockloss = item[6]
H1_peak_ground_velocity_list.append(float(peakgroundvelocity))
locklosslist.append(lockloss)
eq_time_list.append(eq_time)
pw_arrival_list.append(pw_arrival)
eq_mag_list.append(eq_mag)
eq_distance_list.append(eq_distance)
eq_depth_list.append(eq_depth)
H1_binary_file = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LHO_O1_binary_{1}{0}_4.txt'.format(direction, rms_toggle), 'w')
for eq_time, pw_arrival,peak_ground_velocity,eq_mag,eq_distance,eq_depth, lockloss in zip(eq_time_list, pw_arrival_list,H1_peak_ground_velocity_list,eq_mag_list,eq_distance_list,eq_depth_list, locklosslist):
if lockloss == "Y":
lockloss_binary = '1'
H1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss_binary))
elif lockloss == "N":
lockloss_binary = '0'
H1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20}\n'.format(eq_time,pw_arrival,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss_binary))
else:
pass
H1_binary_file.close()
locklosslistZ = []
locklosslistY = []
locklosslistN = []
eq_time_list_Z = []
eq_time_list_N = []
eq_time_list_Y = []
H1_peak_ground_velocity_list_Z = []
H1_peak_ground_velocity_list_N = []
H1_peak_ground_velocity_list_Y = []
peak_ground_acceleration_list_Z = []
peak_ground_acceleration_list_N = []
peak_ground_acceleration_list_Y = []
H1_peak_ground_velocity_sorted_list, locklosssortedlist, predicted_peak_ground_velocity_sorted_list = (list(t) for t in zip(*sorted(zip(H1_peak_ground_velocity_list, locklosslist, predicted_peak_ground_velocity_list))))
num_lock_list = []
YN_peak_list = []
for sortedpeak, sortedlockloss in zip(H1_peak_ground_velocity_sorted_list, locklosssortedlist):
if sortedlockloss == "Y":
YN_peak_list.append(sortedpeak)
num_lock_list.append(1)
elif sortedlockloss == "N":
YN_peak_list.append(sortedpeak)
num_lock_list.append(0)
num_lock_prob_cumsum = np.divide(np.cumsum(num_lock_list), np.cumsum(np.ones(len(num_lock_list))))
f, axarr = plt.subplots(1)
for t,time,peak, lockloss in zip(range(len(eq_time_list)),eq_time_list,H1_peak_ground_velocity_list,locklosslist):
if lockloss == "Z":
eq_time_list_Z.append(t)
H1_peak_ground_velocity_list_Z.append(peak)
locklosslistZ.append(lockloss)
elif lockloss == "N":
eq_time_list_N.append(t)
H1_peak_ground_velocity_list_N.append(peak)
locklosslistN.append(lockloss)
elif lockloss == "Y":
eq_time_list_Y.append(t)
H1_peak_ground_velocity_list_Y.append(peak)
locklosslistY.append(lockloss)
axarr.plot(eq_time_list_N, H1_peak_ground_velocity_list_N, 'go', label='locked at earthquake(eq)')
axarr.plot(eq_time_list_Y, H1_peak_ground_velocity_list_Y, 'ro', label='lockloss at earthquake(eq)')
axarr.set_title('H1 magnitude 4 Lockstatus Plot')
axarr.set_yscale('log')
axarr.set_xlabel('earthquake count(eq)')
axarr.set_ylabel('peak ground velocity(m/s)')
axarr.legend(loc='best')
#f.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/lockstatus_LHO_{0}{1}.png'.format(rms_toggle, direction))
f.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_LHO_{0}{1}_4.pdf'.format(rms_toggle, direction))
#plt.figure(2)
#plt.plot(eq_time_list_N, peak_ground_acceleration_list_N, 'go', label='locked at earthquake(eq)')
#plt.plot(eq_time_list_Y, peak_ground_acceleration_list_Y, 'ro', label='lockloss at earthquake(eq)')
#plt.title('H1 Lockstatus Plot(acceleration)')
#plt.yscale('log')
#plt.xlabel('earthquake count(eq)')
#plt.ylabel('peak ground acceleration(m/s)')
#plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
#plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.clf()
#plt.figure(3)
#plt.plot(H1_peak_ground_velocity_sorted_list, predicted_peak_ground_velocity_sorted_list, 'o', label='actual vs predicted')
#plt.title('H1 actual vs predicted ground velocity')
#plt.xscale('log')
#plt.yscale('log')
#plt.xlabel('peak ground velocity(m/s)')
#plt.ylabel('predicted peak ground velocity(m/s)')
#plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/check_prediction_LHO_{0}{1}.png'.format(rms_toggle, direction))
#plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/check_prediction_LHO_{0}{1}.png'.format(rms_toggle, direction))
#plt.clf()
threshold_file_H1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/threshhold_data_{0}{1}_4.txt'.format(rms_toggle, direction), 'w')
num_of_lockloss = len(locklosslistY)
total_lockstatus = num_of_lockloss + len(locklosslistN)
print(total_lockstatus)
total_lockstatus_all = num_of_lockloss + len(locklosslistN) + len(locklosslistZ)
total_percent_lockloss = num_of_lockloss / total_lockstatus
threshold_file_H1.write('The percentage of total locklosses is {0}% \n'.format(total_percent_lockloss * 100))
threshold_file_H1.write('The total number of earthquakes is {0}. \n'.format(total_lockstatus_all))
eqcount_50 = 0
eqcount_75 = 0
eqcount_90 = 0
eqcount_95 = 0
for item, thing in zip(num_lock_prob_cumsum, YN_peak_list):
if item >= .5:
eqcount_50 = eqcount_50 + 1
if item >= .75:
eqcount_75 = eqcount_75 + 1
if item >= .9:
eqcount_90 = eqcount_90 + 1
if item >= .95:
eqcount_95 = eqcount_95 + 1
threshold_file_H1.write('The number of earthquakes above 50 percent is {0}. \n'.format(eqcount_50))
threshold_file_H1.write('The number of earthquakes above 75 percent is {0}. \n'.format(eqcount_75))
threshold_file_H1.write('The number of earthquakes above 90 percent is {0}. \n'.format(eqcount_90))
threshold_file_H1.write('The number of earthquakes above 95 percent is {0}. \n'.format(eqcount_95))
probs = [0.5, 0.75, 0.9, 0.95]
num_lock_prob_cumsum_sort = np.unique(num_lock_prob_cumsum)
YN_peak_list_sort = np.unique(YN_peak_list)
num_lock_prob_cumsum_sort, YN_peak_list_sort = zip(*sorted(zip(num_lock_prob_cumsum_sort, YN_peak_list_sort)))
thresholdsf = interp1d(num_lock_prob_cumsum_sort,YN_peak_list_sort, bounds_error=False)
for item in probs:
threshold = thresholdsf(item)
threshold_file_H1.write('The threshhold at {0}% is {1}(m/s) \n'.format(item * 100, threshold))
threshold_file_H1.write('The number of times of locklosses is {0}. \n'.format(len(locklosslistY)))
threshold_file_H1.write('The number of times of no locklosses is {0}. \n'.format(len(locklosslistN)))
threshold_file_H1.write('The number of times of not locked is {0}. \n'.format(len(locklosslistZ)))
threshold_file_H1.close()
plt.figure(4)
plt.plot(YN_peak_list_sort, num_lock_prob_cumsum_sort, 'kx', label='probability of lockloss')
plt.title('H1 Lockloss Probability')
plt.xscale('log')
plt.grid(True)
plt.xlabel('peak ground velocity (m/s)')
plt.ylabel('Lockloss Probablity')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LHO/lockloss_probablity_LHO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockloss_probablity_LHO_{0}{1}_4.pdf'.format(rms_toggle, direction))
plt.clf()
## LLO
os.system('mkdir -p /home/eric.coughlin/L1O1/')
os.system('mkdir -p /home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/')
for direction in ['Z','X','Y']:
if rms_toggle == "":
channel = 'L1:ISI-GND_STS_HAM2_{0}_DQ'.format(direction)
elif rms_toggle == "RMS_":
channel = 'L1:ISI-GND_STS_HAM5_{0}_BLRMS_30M_100M'.format(direction)
L1_lock_time_list = []
L1_lockloss_time_list = []
options = parse_commandline()
predicted_peak_ground_velocity_list = []
H1_peak_ground_velocity_list =[]
datafileL1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_O1_{0}{1}_4.txt'.format(rms_toggle, direction), 'r')
resultfileL1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_lockstatus_{0}{1}_4.txt'.format(rms_toggle, direction), 'w')
L1_channel_lockstatus_data = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/segs_Locked_L_1126569617_1136649617.txt', 'r')
for item in (line.strip().split() for line in L1_channel_lockstatus_data):
L1_lock_time = item[0]
L1_lockloss_time = item[1]
L1_lock_time_list.append(float(L1_lock_time))
L1_lockloss_time_list.append(float(L1_lockloss_time))
#resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} \n'.format('eq arrival time','pw arrival time','peak ground velocity','lockloss'))
for column in ( line.strip().split() for line in datafileL1):
eq_time = column[0]
eq_mag = column[1]
pw_arrival_time = column[2]
sw_arrival_time = column[3]
eq_distance = column[12]
eq_depth = column[13]
rw_arrival_time = column[5]
peak_ground_velocity = column[14]
predicted_peak_ground_velocity = column[7]
predicted_peak_ground_velocity_list.append(float(predicted_peak_ground_velocity))
#L1_lock_time = next((item for item in L1_lock_time_list if item <= float(pw_arrival_time)),[None])
#L1_lockloss_time = next((item for item in L1_lockloss_time_list if item <= float(float(pw_arrival_time) + float(options.time_after_p_wave))),[None])
L1_lock_time = min(L1_lock_time_list, key=lambda x:abs(x-float(pw_arrival_time)))
L1_lockloss_time = min(L1_lockloss_time_list, key=lambda x:abs(x-float(float(pw_arrival_time) + float(options.time_after_p_wave))))
lockloss = ""
if (L1_lock_time <= float(pw_arrival_time) and L1_lockloss_time <= float(float(pw_arrival_time) + float(options.time_after_p_wave))):
lockloss = "Y"
resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss))
elif (L1_lock_time <= float(pw_arrival_time) and L1_lockloss_time > float(float(pw_arrival_time) + float(options.time_after_p_wave))):
lockloss = "N"
resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss))
elif (L1_lock_time > float(pw_arrival_time)):
lockloss = "Z"
resultfileL1.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival_time,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss))
datafileL1.close()
resultfileL1.close()
L1_channel_lockstatus_data.close()
eq_time_list = []
locklosslist = []
pw_arrival_list = []
peak_acceleration_list =[]
peak_displacement_list = []
eq_mag_list = []
eq_distance_list = []
eq_depth_list = []
resultfileplotL1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_lockstatus_{0}{1}_4.txt'.format(rms_toggle, direction), 'r')
for item in (line.strip().split() for line in resultfileplotL1):
eq_time = item[0]
pw_arrival = item[1]
peakgroundvelocity = item[2]
eq_mag = item[3]
eq_distance = item[4]
eq_depth = item[5]
lockloss = item[6]
H1_peak_ground_velocity_list.append(float(peakgroundvelocity))
locklosslist.append(lockloss)
eq_time_list.append(eq_time)
pw_arrival_list.append(pw_arrival)
eq_mag_list.append(eq_mag)
eq_distance_list.append(eq_distance)
eq_depth_list.append(eq_depth)
L1_binary_file = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/LLO_O1_binary_{0}{1}_4.txt'.format(rms_toggle, direction), 'w')
for eq_time, pw_arrival,peak_ground_velocity,eq_mag,eq_distance,eq_depth, lockloss in zip(eq_time_list, pw_arrival_list,H1_peak_ground_velocity_list,eq_mag_list,eq_distance_list,eq_depth_list, locklosslist):
if lockloss == "Y":
lockloss_binary = '1'
L1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss_binary))
elif lockloss == "N":
lockloss_binary = '0'
L1_binary_file.write('{0:^20} {1:^20} {2:^20} {3:^20} {4:^20} {5:^20} {6:^20} \n'.format(eq_time,pw_arrival,peak_ground_velocity,eq_mag,eq_distance,eq_depth,lockloss_binary))
else:
pass
L1_binary_file.close()
locklosslistZ = []
locklosslistY = []
locklosslistN = []
eq_time_list_Z = []
eq_time_list_N = []
eq_time_list_Y = []
H1_peak_ground_velocity_list_Z = []
H1_peak_ground_velocity_list_N = []
H1_peak_ground_velocity_list_Y = []
peak_ground_acceleration_list_Z = []
peak_ground_acceleration_list_N = []
peak_ground_acceleration_list_Y = []
H1_peak_ground_velocity_sorted_list, locklosssortedlist, predicted_peak_ground_velocity_sorted_list = (list(t) for t in zip(*sorted(zip(H1_peak_ground_velocity_list, locklosslist, predicted_peak_ground_velocity_list))))
num_lock_list = []
YN_peak_list = []
for sortedpeak, sortedlockloss in zip(H1_peak_ground_velocity_sorted_list, locklosssortedlist):
if sortedlockloss == "Y":
YN_peak_list.append(sortedpeak)
num_lock_list.append(1)
elif sortedlockloss == "N":
YN_peak_list.append(sortedpeak)
num_lock_list.append(0)
num_lock_prob_cumsum = np.cumsum(num_lock_list) / np.cumsum(np.ones(len(num_lock_list)))
plt.figure(8)
for t,time,peak,lockloss in zip(range(len(eq_time_list)),eq_time_list,H1_peak_ground_velocity_list,locklosslist):
if lockloss == "Z":
eq_time_list_Z.append(t)
H1_peak_ground_velocity_list_Z.append(peak)
locklosslistZ.append(lockloss)
elif lockloss == "N":
eq_time_list_N.append(t)
H1_peak_ground_velocity_list_N.append(peak)
locklosslistN.append(lockloss)
elif lockloss == "Y":
eq_time_list_Y.append(t)
H1_peak_ground_velocity_list_Y.append(peak)
locklosslistY.append(lockloss)
plt.plot(eq_time_list_N, H1_peak_ground_velocity_list_N, 'go', label='locked at earthquake(eq)')
plt.plot(eq_time_list_Y, H1_peak_ground_velocity_list_Y, 'ro', label='lockloss at earthquake(eq)')
plt.title('L1 Lockstatus Plot')
plt.yscale('log')
plt.xlabel('earthquake count(eq)')
plt.ylabel('peak ground velocity(m/s)')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/lockstatus_LLO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_LLO_{0}{1}_4.pdf'.format(rms_toggle, direction))
plt.clf()
#plt.figure(23)
#plt.plot(eq_time_list_N, peak_ground_acceleration_list_N, 'go', label='locked at earthquake(eq)')
#plt.plot(eq_time_list_Y, peak_ground_acceleration_list_Y, 'ro', label='lockloss at earthquake(eq)')
#plt.title('H1 Lockstatus Plot(acceleration)')
#plt.yscale('log')
#plt.xlabel('earthquake count(eq)')
#plt.ylabel('peak ground acceleration(m/s)')
#plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
#plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockstatus_acceleration_LHO_{0}{1}.png'.format(rms_toggle, direction))
#plt.clf()
#plt.figure(9)
#plt.plot(H1_peak_ground_velocity_list, predicted_peak_ground_velocity_list, 'o', label='actual vs predicted')
#plt.title('L1 actual vs predicted ground velocity')
#plt.xscale('log')
#plt.yscale('log')
#plt.xlabel('peak ground velocity(m/s)')
#plt.ylabel('predicted peak ground velocity(m/s)')
#plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/check_predictionLLO_{0}{1}.png'.format(rms_toggle, direction))
#plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/check_predictionLLO_{0}{1}.png'.format(rms_toggle, direction))
#plt.clf()
threshold_file_L1 = open('/home/eric.coughlin/gitrepo/seismon/RfPrediction/data/threshhold_data_{0}{1}_4.txt'.format(rms_toggle, direction), 'w')
num_of_lockloss = len(locklosslistY)
total_lockstatus = num_of_lockloss + len(locklosslistN)
total_lockstatus_all = num_of_lockloss + len(locklosslistN) + len(locklosslistZ)
total_percent_lockloss = num_of_lockloss / total_lockstatus
threshold_file_L1.write('The percentage of total locklosses is {0}% \n'.format(total_percent_lockloss * 100))
threshold_file_L1.write('The total number of earthquakes is {0}. \n'.format(total_lockstatus_all))
eqcount_50 = 0
eqcount_75 = 0
eqcount_90 = 0
eqcount_95 = 0
for item, thing in zip(num_lock_prob_cumsum, YN_peak_list):
if item >= .5:
eqcount_50 = eqcount_50 + 1
if item >= .75:
eqcount_75 = eqcount_75 + 1
if item >= .9:
eqcount_90 = eqcount_90 + 1
if item >= .95:
eqcount_95 = eqcount_95 + 1
threshold_file_L1.write('The number of earthquakes above 50 percent is {0}. \n'.format(eqcount_50))
threshold_file_L1.write('The number of earthquakes above 75 percent is {0}. \n'.format(eqcount_75))
threshold_file_L1.write('The number of earthquakes above 90 percent is {0}. \n'.format(eqcount_90))
threshold_file_L1.write('The number of earthquakes above 95 percent is {0}. \n'.format(eqcount_95))
probs = [0.5, 0.75, 0.9, 0.95]
num_lock_prob_cumsum_sort = np.unique(num_lock_prob_cumsum)
YN_peak_list_sort = np.unique(YN_peak_list)
num_lock_prob_cumsum_sort, YN_peak_list_sort = zip(*sorted(zip(num_lock_prob_cumsum_sort, YN_peak_list_sort)))
thresholds = []
thresholdsf = interp1d(num_lock_prob_cumsum_sort,YN_peak_list_sort,bounds_error=False)
for item in probs:
threshold = thresholdsf(item)
threshold_file_L1.write('The threshhold at {0}% is {1}(m/s) \n'.format(item * 100, threshold))
threshold_file_L1.write('The number of times of locklosses is {0}. \n'.format(len(locklosslistY)))
threshold_file_L1.write('The number of times of no locklosses is {0}. \n'.format(len(locklosslistN)))
threshold_file_L1.write('The number of times of not locked is {0}. \n'.format(len(locklosslistZ)))
threshold_file_L1.close()
plt.figure(10)
plt.plot(YN_peak_list_sort, num_lock_prob_cumsum_sort, 'kx', label='probability of lockloss')
plt.title('L1 Lockloss Probability')
plt.xscale('log')
plt.grid(True)
plt.xlabel('peak ground velocity (m/s)')
plt.ylabel('Lockloss Probablity')
plt.legend(loc='best')
#plt.savefig('/home/eric.coughlin/public_html/lockloss_threshold_plots/LLO/lockloss_probablity_LLO_{0}{1}.png'.format(rms_toggle, direction))
plt.savefig('/home/eric.coughlin/gitrepo/seismon/RfPrediction/plots/lockloss_probablity_LLO_{0}{1}_4.pdf'.format(rms_toggle, direction))
plt.clf()
| gpl-3.0 |
thomasyu888/Genie | tests/test_seg.py | 1 | 3012 | import mock
import pytest
import pandas as pd
import synapseclient
from genie.seg import seg
from genie.cbs import cbs
syn = mock.create_autospec(synapseclient.Synapse)
segClass = seg(syn, "SAGE")
cbsClass = cbs(syn, "SAGE")
def test_processing():
expectedSegDf = pd.DataFrame({
"ID": ['GENIE-SAGE-ID1-1', 'GENIE-SAGE-ID2-1', 'GENIE-SAGE-ID3-1',
'GENIE-SAGE-ID4-1', 'GENIE-SAGE-ID5-1'],
"CHROM": ['1', '2', '3', '4', '5'],
"LOCSTART": [1, 2, 3, 4, 3],
"LOCEND": [1, 2, 3, 4, 2],
"NUMMARK": [1, 2, 3, 4, 3],
"SEGMEAN": [1, 2, 3.9, 4, 3],
"CENTER": ["SAGE", "SAGE", "SAGE", "SAGE", "SAGE"]})
segDf = pd.DataFrame({
"ID": ['ID1-1', 'ID2-1', 'ID3-1', 'ID4-1', 'ID5-1'],
"CHROM": ['chr1', 2, 3, 4, 5],
"LOC.START": [1, 2, 3, 4, 3],
"LOC.END": [1, 2, 3, 4, 2],
"NUM.MARK": [1, 2, 3, 4, 3],
"SEG.MEAN": [1, 2, 3.9, 4, 3]})
newSegDf = segClass._process(segDf)
assert expectedSegDf.equals(newSegDf[expectedSegDf.columns])
newSegDf = cbsClass._process(segDf)
assert expectedSegDf.equals(newSegDf[expectedSegDf.columns])
def test_validation():
with pytest.raises(AssertionError):
segClass.validateFilename(["foo"])
assert segClass.validateFilename(["genie_data_cna_hg19_SAGE.seg"]) == "seg"
assert cbsClass.validateFilename(["genie_data_cna_hg19_SAGE.cbs"]) == "cbs"
segDf = pd.DataFrame({
"ID": ['ID1', 'ID2', 'ID3', 'ID4', 'ID5'],
"CHROM": [1, 2, 3, 4, 5],
"LOC.START": [1, 2, 3, 4, 3],
"LOC.END": [1, 2, 3, 4, 3],
"NUM.MARK": [1, 2, 3, 4, 3],
"SEG.MEAN": [1, 2, 3, 4, 3]})
error, warning = segClass._validate(segDf)
assert error == ""
assert warning == ""
segDf = pd.DataFrame({
"ID": ['ID1', 'ID2', 'ID3', 'ID4', 'ID5'],
"CHROM": [1, 2, float('nan'), 4, 5],
"LOC.START": [1, 2, 3, 4, float('nan')],
"LOC.END": [1, 2, 3, float('nan'), 3],
"NUM.MARK": [1, 2, 3, 4, 3]})
expectedErrors = (
"Your seg file is missing these headers: SEG.MEAN.\n"
"Seg: No null or empty values allowed in column(s): "
"CHROM, LOC.END, LOC.START.\n")
error, warning = segClass._validate(segDf)
assert error == expectedErrors
assert warning == ""
error, warning = cbsClass._validate(segDf)
assert error == expectedErrors
assert warning == ""
segDf = pd.DataFrame({
"ID": ['ID1', 'ID2', 'ID3', 'ID4', 'ID5'],
"CHROM": [1, 2, 3, 4, 5],
"LOC.START": [1, 2, 3, 4.3, 3],
"LOC.END": [1, 2, 3.4, 4, 3],
"NUM.MARK": [1, 2, 3, 33.3, 3],
"SEG.MEAN": [1, 2, 'f.d', 4, 3]})
error, warning = segClass._validate(segDf)
expectedErrors = (
"Seg: Only integars allowed in these column(s): "
"LOC.END, LOC.START, NUM.MARK.\n"
"Seg: Only numerical values allowed in SEG.MEAN.\n")
assert error == expectedErrors
assert warning == ""
| mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/linear_model/tests/test_huber.py | 54 | 7619 | # Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=10000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
if huber_warm.n_iter_ is not None:
assert_equal(0, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| mit |
MicheleMaris/grasp_lib | grasp_lib.py | 1 | 64897 | __DESCRIPTION__="""
grasp_lib.py V 0.6 - 3 Feb 2012 - 23 Mar 2012 (0.4) - 2012 Nov 27 (0.5) - 2013 Dec 12 -
M.Maris, M.Sandri, F.Villa
From a set of routines created by M.Sandri e F.Villa
This library allows to import a grasp file in GRASP format and to convert it in an healpix map, it also
performs interpolation of the grasp map
In GRASP convention theta and phi are colatitude and longitude, with
phi meridian circle longitude, [0,180]deg
theta on meridian circle polar distance, [-180,180] deg
a point with theta < 0 denotes a point located at the same polar distance of abs(theta) but with longitude pi+180deg
The usual polar convention is
phi meridian halfcircle longitude, [-180,180]deg
theta on meridian circle polar distance, [0,180] deg
"""
def thetaUVphiUV2UV(thetaUV,phiUV,deg=True) :
"""converts thetaUV and phiUV into U=x0=sin(thetaUV)*cos(phiUV), V=y0=sin(thetaUV)*sin(phiUV)"""
from numpy import pi, sin, cos
if deg : return sin(180./pi*thetaUV)*cos(180./pi*phiUV),sin(180./pi*thetaUV)*sin(180./pi*phiUV)
return sin(thetaUV)*cos(phiUV),sin(thetaUV)*sin(phiUV)
def UV2thetaUVphiUV(U,V,deg=True) :
from numpy import pi, arctan2, arccos,arcsin,sin,cos,array,mod
f=(180./pi) if deg else 1.
phiUV=arctan2(V,U)
A=cos(phiUV)
B=sin(phiUV)
thetaUV=arcsin((A*U+B*V)/(A*A+B*B))
if deg : return f*thetaUV,mod(f*phiUV,360.)
return f*thetaUV,mod(phiUV,2.*pi)
def phitheta2longcolat(phi_grasp,theta_grasp) :
""" converts GRASP (phi,theta) coordinates into standard (long,colat)
upon request returns a structure """
import numpy as np
_long = phi_grasp*1.
colat=theta_grasp*1.
idx = np.where(colat < 0)[0]
if len(idx) > 0 :
_long[idx]=_long[idx]+180.
colat[idx]=np.abs(colat[idx])
return _long,colat
def longcolat2phitheta(_long,colat) :
"""converts ususal polar (long,colat) coordinates into GRASP (phi,theta) returns a structure"""
import numpy as np
phi=_long*1.
theta=colat*1.
idx = np.where(phi >= 180.)[0]
if len(idx) > 0 :
phi[idx]=phi[idx]-180.
theta[idx]=-theta[idx]
return phi,theta
def longcolat2rowcol(_long,colat,phi0,dphi,theta0,dtheta) :
""" converts (long,colat) into index of phi and of theta in the matrix """
import numpy as np
phi,theta=longcolat2phitheta(_long,colat)
return (pt.phi-phi0)/dphi,(pt.theta-theta0)/dtheta
def ipix2rowcol(nside,ipix,phi0,dphi,theta0,dtheta,nest=False) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang_ring
colat,_long=pix2ang(nside,ipix,nest=nest)
return longcolat2rowcol(_long/np.pi*180.,colat/np.pi*180.,phi0,dphi,theta0,dtheta)
def nside2ipix(nside,Reversed=False) :
""" converts nside into a list of pixels (ring)
reversed = True means the orderring is reversed
"""
import numpy as np
if not Reversed : return np.arange(12*int(nside)*int(nside))
return np.arange(12*int(nside)*int(nside)-1,-1,-1)
from grid2d import *
#class GraspMapsCube :
#"a list of grasp maps is used to integrate over a grasp map in band"
#def _class_line_cube :
#def __init__(self,listNames) :
#self.Name=[]
#for k in range(len(listNames)) :
#try :
#self.plane(open(listNames[k],'r').readlines())
#self.Name.append(listNames)
#except :
#print k," impossible to read, skipped"
#self.N=len(self.Name)
#def __len__(self) :
#return self.N
#def __getitem__(self,i) :
#import numpy as np
#l=[]
#for k in range(len(self)) :
#l.append(self.plane[k][i]))
#return l
#def __init__(self,listNames) :
#import numpy as np
#self._line=-1
#self._cube=_class_line_cube(listNames)
#def __len__(self) :
#return len(self._cube)
#def _fetch_line(self) :
#self._line=+1
#return self._cube[self._line]
def components2cocross(r1,i1,r2,i2) :
"given r1,i1,r2,i2 return Eco,Ecross"
re=[r1,r2]
im=[i1,i2]
p=[]
p.append(r1**2+i1**2)
p.append(r2**2+i2**2)
if p[0].max() > p[1].max() :
ico=0
icross=1
else :
ico=1
icross=0
Eco=re[ico]*complex(0,1.)*re[ico]
Ecross=re[icross]*complex(0,1.)*re[icross]
return Eco,Ecross
def cocross2rhclhc(Eco,Ecross) :
"given Eco,Ecross return Erhc,Elhc"
isqrt2=2.**(-0.5)
Erhc=(Eco-complex(0,1.)*Ecross)*isqrt2
Elhc=(Eco+complex(0,1.)*Ecross)*isqrt2
return Erhc,Elhc
def components2rhclhc(r1,i1,r2,i2) :
"given r1,i1,r2,i2 return Erhc,Elhc"
Eco,Ecross=components2cocross(r1,i1,r2,i2)
return cocross2rhclhc(Eco,Ecross)
def polarization_ellipse_from_fields(Erhc,Elhc) :
"given Erhc,Elhc returns rmajor, rminor, directivity, psi_pol_rad"
import numpy as np
isqrt2=2.**(-0.5)
rmajor=abs(abs(Erhc)+abs(Elhc))*isqrt2
rminor=abs(abs(Erhc)-abs(Elhc))*isqrt2
directivity=abs(Erhc)**2+abs(Elhc)**2
aa=(Erhc/Elhc)**0.5
psi_pol=np.arctan2(aa.imag,aa.real)
return rmajor,rminor,directivity,psi_pol
def components2polarization_ellipse(r1,i1,r2,i2) :
"given r1,i1,r2,i2 return rmajor, rminor, directivity, psi_pol_rad"
Erhc,Elhc=components2rhclhc(r1,i1,r2,i2)
return polarization_ellipse_from_fields(Erhc,Elhc)
class GraspMap(MapGrid) :
def __init__(self,inputfile,skiplines,CounterPhi=[1e6,1.],silent=False,useCounterPhi=False,closeColumn=False,Pickle=False,periodicColumn=False,badConversionValue=0.) :
"""badConversionValue = Value to replace samples with problems in converting strings to numbers"""
MapGrid.__init__(self)
self._init_failed = True
if Pickle :
self.load(inputfile)
self._init_failed = False
return
self.info['graps_file']=inputfile.strip()
self.info['projection']='GRASP-CUT'
self.info['ReferenceDocument']="LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS\nM. Sandri\nPL-LFI-PST-TN-044, 1.0,July 2003"
self.info['numbad']=-1
if inputfile.strip() == '' :
return
self.get_cuts(inputfile,skiplines,badConversionValue=badConversionValue)#,CounterPhi=CounterPhi,silent=silent,useCounterPhi=useCounterPhi)
if periodicColumn :
self.set_col_periodic()
if closeColumn :
print 'Closing rows at right'
for k in self.M.keys() :
if k!='_row_values' and k!='_col_values' and k!='_row_index' and k!='_col_index' :
for r in range(self.R['n']) :
self.M[k][r][-1]=self.M[k][self.R['n']-1-r][0]
#if closeColumn : self.right_close_col()
def get_cuts(self,inputfile,skiplines,CounterPhi=[1e6,1.],silent=False,useCounterPhi=False,badConversionValue=0.) :
""" get_cuts
this program get the cuts file
inputfile = name of the input file
the output is a structure with an entry for each cut in phi
COUNTER_PHI v.z. HEADER_PHI
By default the field PHI in each cut is the one declared in the
header of the block of the input grasp file.
The value is also returned as HEADER_PHI in the structure
There are some cases in which the header is bad formatted and
the PHI is not reliable.
To solve this problem GET_CUTS provides an internal PHI calculator
assuming PHI increments on constant steps, the value from
the calculator is in COUNTER_PHI
The counter is tuned by using the keyword
CounterPhi = [phi0,step] (default [1d6,1d0])
so the default is simply a counter of the number of cuts.
The value 1e6. as first value is to assure the COUNTER_PHI is not
confused with an angle.
To make COUNTER_PHI as PHI it is sufficient to add the keyword
/useCounterPhi
so that the PHI will be forced to be COUNTER_PHI instead of
HEADER_PHI.
At last the tag PHI_TYPE in the structure specifies wether PHI
comes from the HEADER or from the COUNTER
In self.info['numbad'] is the number of lines which can not be properly decoded into float,
they are marked bad (1) in the flag_bad map.
badConversionValue = Value to replace samples with problems in converting strings to numbers
"""
import sys
import numpy as np
import copy
CounterPhi0=CounterPhi[0]*1.
CounterDeltaPhi=CounterPhi[1]*1.
deltaphi=1.
phi0=0.
header='header'
fileinput='fileinput'
thetai=0.
dtheta=0.
ntheta=1
phi=0.
k1=1
k2=1
k3=1
comp1r=0.
comp1i=0.
comp2r=0.
comp2i=0.
#
#********************************
#
self.clean()
self.info['hdr']={'file':inputfile}
self.info['wrong_lines']=[]
print "Reading ",inputfile
try :
h=open(inputfile,'r').readlines()
self.mapname=inputfile
self.info['inputfile']=inputfile
except :
print "File %s not found"%inputfile
return
# removes the new line
for i in range(len(h)) :
h[i] = h[i].split('\n')[0].split('\r')[0]
# skips a given number of lines
self.info['hdr']['skipped']=[]
self.info['hdr']['skiplines']=skiplines*1
if skiplines > 0 :
for line in h[0:(skiplines-1)] :
self.info['hdr']['skipped'].append(line.split('\n')[0])
h=h[skiplines:]
# skips all the lines until it reaches an header
notHeader = True
icount=-1
while notHeader :
icount+=1
ll=h[icount].split('\n')[0].strip().split()
try :
lla = np.array(ll)
notHeader = False
except :
notHeader = True
if not notHeader :
if len(ll) != 7 :
notHeader = True
icount-=1
if icount > 0 :
for k in h[0:icount] :
self.info['hdr']['skipped'].append(k)
self.info['hdr']['skiplines']+=1
h=h[icount:]
# the second line of the first block gives the number of lines per block
currentline=1
ll=h[currentline].split('\n')[0].strip().split()
try :
thetai = float(ll[0])
dtheta = float(ll[1])
ntheta = int(ll[2])
header_phi = float(ll[3])
k1 = int(ll[4])
k2 = int(ll[5])
k3 = int(ll[6])
except :
return h[currentline-1],'level 1',currentline,h
self.info['nlines']=len(h)
self.info['blocksize']=ntheta+2
self.info['nblocks'] = len(h)/(ntheta+2)
nblocks = self.info['nblocks']
self.info['thetai']=np.zeros(nblocks)
self.info['dtheta']=np.zeros(nblocks)
self.info['ntheta']=np.zeros(nblocks,dtype='int')
self.info['phi']=np.zeros(nblocks)
self.info['k1']=np.zeros(nblocks,dtype='int')
self.info['k2']=np.zeros(nblocks,dtype='int')
self.info['k3']=np.zeros(nblocks,dtype='int')
self.info['line']=np.zeros(nblocks,dtype='string')
self.info['fail']=np.zeros(nblocks,dtype='int')+1
self.info['iline']=np.zeros(nblocks,dtype='int')
if (ntheta+2)*self.info['nblocks']-len(h) != 0 :
print "Error: too much or too few lines to form the required number of blocks"
print "Nblocks : ",self.info['nblocks']
print "lines : ",len(h)
print "lines per block : ",ntheta+2
print "lines in blocks : ",(ntheta+2)*nblocks
print "residual lines : ",(ntheta+2)*nblocks-len(h)
return None
print self.info['nblocks']," x ",ntheta," elements"
# decomposes all the headers
for i_block in range(nblocks) :
ii = i_block*self.info['blocksize']
self.info['iline'][i_block]=ii*1
self.info['line'][i_block]=h[ii]+''
ll=h[ii+1].split('\n')[0].strip().split()
try :
self.info['thetai'][i_block] = float(ll[0])
self.info['dtheta'][i_block] = float(ll[1])
self.info['ntheta'][i_block] = int(ll[2])
self.info['phi'][i_block] = float(ll[3])
self.info['k1'][i_block] = int(ll[4])
self.info['k2'][i_block] = int(ll[5])
self.info['k3'][i_block] = int(ll[6])
self.info['fail'][i_block] = 0
except :
print "Fail to decode block %d, line %d\n'%s'\n"%(i_block,ii,h[ii+1])
if self.info['fail'].sum() > 0 :
print "fail to decode blocks"
return
# sets the phi along the x axis of the grid i.e. the columns
self.set_col_scale('phi','deg',self.info['phi'])
self.dphi=self.C['delta']
self.phi0=self.C['min']
# sets the theta along the y axis of the grid i.e. the rows
self.set_row_scale('theta','deg',np.arange(self.info['ntheta'][0])*self.info['dtheta'][0]+self.info['thetai'][0])
self.dtheta=self.R['delta']
self.theta0=self.R['min']
#initialize private compoenents used for debug
self.newmap('_line_index',dtype='int')
# initializes the five component matrices
self.newmap('r1')
self.newmap('i1')
self.newmap('r2')
self.newmap('i2')
self.newmap('power')
self.newmap('flag_bad',dtype='int')
#
# fill the component matrices
# each block is for a given phi, i.e. a given Column
# each value is for a given theta, i,e, a given Row
self.newmap('phi')
self.newmap('theta')
self.info['numbad']=0
for i_block in range(nblocks) :
ii = i_block*self.info['blocksize']+2
for i_raw in range(self.R['n']) :
iline = ii+i_raw
self.M['phi'][i_raw,i_block]=self.C['v'][i_block]*1
self.M['theta'][i_raw,i_block]=self.R['v'][i_raw]*1
self.M['_row_values'][i_raw,i_block]=self.R['v'][i_raw]*1
self.M['_col_values'][i_raw,i_block]=self.C['v'][i_block]*1
self.M['_row_index'][i_raw,i_block]=i_raw*1
self.M['_col_index'][i_raw,i_block]=i_block*1
self.M['_line_index'][i_raw,i_block]=iline*1
ll=h[iline].split('\n')[0].strip().split()
lla=np.zeros(4)
nbad=0
for icol in range(4) :
try :
lla[icol]=np.array(ll[icol],dtype=float)
except :
lla[icol]=badConversionValue
nbad+=1
if nbad == 0 :
self.M['flag_bad'][i_raw,i_block]=0
else :
self.info['wrong_lines'].append("skiplines %d, line %d, row %d, block %d\n%s"%(self.info['hdr']['skiplines'],iline,i_raw,i_block,h[iline]))
print "Invalid litteral in file, skiplines %d, line %d, row %d, block %d\n"%(self.info['hdr']['skiplines'],iline,i_raw,i_block)
print ">"+" ".join(ll)+"< out >",lla,'<'
self.M['flag_bad'][i_raw,i_block]=1
self.info['numbad']+=1
self.M['r1'][i_raw,i_block]=lla[0]*1
self.M['i1'][i_raw,i_block]=lla[1]*1
self.M['r2'][i_raw,i_block]=lla[2]*1
self.M['i2'][i_raw,i_block]=lla[3]*1
self.M['power']=self.M['r1']**2+self.M['i1']**2+self.M['r2']**2+self.M['i2']**2
# stores longitudes and latitudes
self.newmap('long')
self.newmap('colat')
self.M['long']=self.M['_row_values']*0
self.M['colat']=self.M['_row_values']*0
self.M['long'],self.M['colat']=phitheta2longcolat(self.M['phi'],self.M['theta'])
self._init_failed = False
def initFailed(self) :
return self._init_failed
def haveBadSamples(self) :
if self.initFailed(): return False
return self.info['numbad']>0
def formatGrasp(self) : return {'float':' %17.10e','int':' %4d'}
def recompose_header(self) :
import copy
hdr=[]
if self.info['hdr']['skiplines']>0 :
hdr=copy.deepcopy(self.info['hdr']['skipped'])
return hdr
def recompose_block_header(self,i_block) :
import numpy as np
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
if i_block < 0 : return
if i_block >= self.info['nblocks'] : return
ll=''
ll+=fmtF%self.info['thetai'][i_block]
ll+=fmtF%self.info['dtheta'][i_block]
ll+=fmtI%self.info['ntheta'][i_block]
ll+=fmtF%self.info['phi'][i_block]
ll+=fmtI%self.info['k1'][i_block]
ll+=fmtI%self.info['k2'][i_block]
ll+=fmtI%self.info['k3'][i_block]
return ['Planck,',ll.upper()]
def recompose_block_data(self,i_block,tab0=None) :
import numpy as np
import copy
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
if i_block < 0 : return
if i_block >= self.info['nblocks'] : return
tab=[]
if type(tab0)==type([]) : tab=copy.deepcopy(tab0)
if type(tab0)==type('') : tab=[tab0]
for i_raw in range(self.R['n']) :
ll=''
ll+=fmtF%self.M['r1'][i_raw,i_block]
ll+=fmtF%self.M['i1'][i_raw,i_block]
ll+=fmtF%self.M['r2'][i_raw,i_block]
ll+=fmtF%self.M['i2'][i_raw,i_block]
tab.append(ll.upper())
return tab
def recompose_block(self,i_block,tab0=None,fmt='%18.10e') :
if i_block < 0 : return
if i_block >= self.info['nblocks'] : return
tab=self.recompose_block_header(i_block)
return self.recompose_block_data(i_block,tab0=tab)
def recompose_map(self,tab0=None) :
tab=[]
for i_block in range(self.info['nblocks']) :
for l in self.recompose_block(i_block) :
tab.append(l)
return tab
def FourColumnsPower(self,power1Name='p1',power2Name='p2',powerName='power') :
"a FourColumns map has r1=sqrt(p1), i1=0, r2=sqrt(p2), i2=0"
new=self.copy()
new.info['ktype']=1
if self.M.has_key(power1Name) and self.M.has_key(power1Name) :
new.info['ncomp']=2
new.M['r1']=self[power1Name]**0.5
new.M['r2']=self[power2Name]**0.5
new.M['i1']=self[power1Name]*0
new.M['i2']=self[power1Name]*0
elif self.M.has_key(power) :
new.info['ncomp']=1
new.M['r1']=self[power]**0.5
new.M['r2']=self[power]*0
new.M['i1']=self[power]*0
new.M['i2']=self[power]*0
else :
print "the map shall contain ",power1Name,power2Name," or ",powerName
return
return new
def grasp2longcolat(self,phi_grasp,theta_grasp) :
""" converts GRASP (phi,theta) coordinates into standard (long,colat)
upon request returns a structure """
import numpy as np
_long = phi_grasp*1.
colat=theta_grasp*1.
idx = np.where(colat < 0)[0]
if len(idx) > 0 :
_long[idx]=_long[idx]+180.
colat[idx]=np.abs(colat[idx])
return _long,colat
def longcolat2grasp(self,_long,colat) :
"""converts ususal polar (long,colat) coordinates into GRASP (phi,theta) returns a structure"""
import numpy as np
phi=_long*1.
theta=colat*1.
idx = np.where(phi >= 180.)[0]
if len(idx) > 0 :
phi[idx]=phi[idx]-180.
theta[idx]=-theta[idx]
return phi,theta
def longcolat2rowcol(self,_long,colat) :
""" converts (long,colat) into index of phi and of theta in the matrix """
import numpy as np
phi,theta=self.longcolat2grasp(_long,colat)
return (theta-self.theta0)/self.dtheta,(phi-self.phi0)/self.dphi
def ipix2longcolat(self,nside,ipix,nest=False,deg=True) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang
import numpy as np
colat,_long=pix2ang(nside,ipix,nest=nest)
if deg : return _long*180./np.pi,colat*180./np.pi
return _long,colat
def ipix2rowcol(self,nside,ipix,nest=False,deg=False) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang
import numpy as np
colat,_long=pix2ang(nside,ipix,nest=nest)
if deg : return self.longcolat2rowcol(_long,colat)
return self.longcolat2rowcol(_long/np.pi*180.,colat/np.pi*180.)
def nside2ipix(self,nside,Reversed=False) :
""" converts nside into a list of pixels (ring)
reversed = True means the orderring is reversed
"""
return nside2ipix(nside,Reversed=Reversed)
def parseColatRange(self,colatrange) :
prs=(colatrange.strip()).split(',')
left = [prs[0][0],float(prs[0][1:])]
right = [prs[1][-1],float(prs[1][0:-1])]
return left,right
def healpix(self,nside,mapname='power',nest=False,Reversed=False,rot=[0.,0.],colatrange=None) :
"""converts to healpix or a stack of healpix maps of given nside
colatrange=None , takes all the map
colatrange=']a,b['
colatrange='[a,b['
colatrange=']a,b]'
for gridmap
"""
import numpy as np
import healpy as H
if colatrange==None :
ipix=self.nside2ipix(nside,Reversed=Reversed)
if rot[0]==0. and rot[1]==0 :
_long,colat = self.ipix2longcolat(nside,ipix)
phi,theta=self.longcolat2grasp(_long,colat)
r1=self.bilinearXY(mapname,phi,theta)
return r1
else :
fact=180./np.pi
prs=(colatrange.strip()).split(',')
left = [prs[0][0],float(prs[0][1:])]
right = [prs[1][-1],float(prs[1][0:-1])]
NPEQ=12*nside/2
print left,right
ipixmin=H.ang2pix(nside,left[1]/fact,0)-NPEQ
ipixmax=H.ang2pix(nside,right[1]/fact,0)+NPEQ
if ipixmin < 0 : ipixmin=0
if ipixmax > 12*nside**2-1 : ipixmax=12*nside**2
ipix = np.arange(ipixmin,ipixmax)
colat,Long = H.pix2ang(nside,ipix)
fl=np.ones(len(colat))
if left[1] == ']' :
fl*=(left[1]/fact)<colat
else :
fl*=(left[1]/fact)<=colat
if right[1] == '[' :
fl*=colat<(right[1]/fact)
else :
fl*=colat<=(right[1]/fact)
idx=np.where(fl)[0]
ipix=ipix[idx]
colat=colat[idx]*fact
Long=Long[idx]*fact
fl=None
idx=None
phi,theta=self.longcolat2grasp(Long,colat)
r1=self.bilinearXY(mapname,phi,theta)
return r1,ipix
def polarplot(self,mapname,long_step=2,colat_step=10,log=None,cm=None,grayBad="#707070",adAxes=True,area=[12.,1.],cmap='hsv',vmin=-30,vmax=-0.1) :
import numpy as np
from matplotlib import colors as Colors
import pylab
from matplotlib import pyplot as plt
from matplotlib import cm
try :
_cm=cm.__dict__[cmap]
except :
print "required cmap ",cmap," no found, replaced with 'hsv'"
print "allowed values "
print cm.__dict__.keys()
if adAxes : ax = plt.subplot(111, polar=True)
y = self.M[mapname]*1
if log == 'e' or log == 'ln' :
y=np.log(y)
elif log == '10' or log == 'log10' :
y=np.log(y)/np.log(10)
elif log == '2' :
y=np.log(y)/np.log(2)
else :
try :
b=np.float(log)
except :
b=None
if b!= None :
y=np.log(y)/np.log(b)
shape2=y.shape
shape1=shape2[0]*shape2[1]
idxLong=np.arange(0,shape2[1],colat_step)
idxColat=np.arange(0,shape2[0],long_step)
for i in idxColat :
tt=np.pi/180*self['long'][i][idxLong]
cc=self['colat'][i][idxLong]
aa=(area[0]-area[1])*(1-np.cos(np.pi/180.*cc))/2.+area[1]
print i,cc.min(),cc.max(),aa.min(),aa.max()
try :
c=plt.scatter(tt,cc, c=y[i][idxLong], s=aa, cmap=_cm,edgecolor='none',vmin=vmin,vmax=vmax)
except :
pass
plt.axis([0,360,0,180])
plt.title(self.mapname)
def mercatore(self,reversed=False) :
"""converts a CUT map to a MERCATOR MAP"""
import numpy as np
import copy
M=MercatoreMap()
shape=self.shape
halfrow=(self.R['n']-1)/2
newrow=(self.R['n']-1)/2+1
newcol=self.C['n']*2
for k in self.M.keys() :
M.M[k]=[]
if reversed :
for drow in np.arange(newrow-1,-1,-1) :
M.M[k].append(np.concatenate((self.M[k][halfrow+drow],self.M[k][halfrow-drow])))
else :
for drow in np.arange(0,newrow) :
M.M[k].append(np.concatenate((self.M[k][halfrow+drow],self.M[k][halfrow-drow])))
M.M[k]=np.array(M.M[k])
M.shape=[M.M['phi'].shape[0],M.M['phi'].shape[1]]
M.M['long']=copy.deepcopy(M.M['phi'])
M.M['colat']=copy.deepcopy(M.M['theta'])
# long is phi where theta > 0 and phi+180 where theta < 0
M.M['long']+=180*(M.M['colat']<0)
# but for theta=0 the algorithm fails, so this is a patch
idx2=np.where(np.sign(M.M['theta']).ptp(axis=1)==2)[0][0]
for k in np.where(np.sign(M.M['theta']).ptp(axis=1)==0)[0] :
M.M['long'][k]=M.M['long'][idx2]
# colat is abs(theta)
M.M['colat']=abs(M.M['colat'])
M.C=copy.deepcopy(self.C)
M.R['name']='long'
M.C['v']=M.M['long'][0]*1
M.C['n']=len(M.C['v'])
M.C['min']=M.C['v'].min()
M.C['max']=M.C['v'].max()
M.R=copy.deepcopy(self.R)
M.R['name']='colat'
M.R['v']=M.M['colat'][:,0]*1
M.R['n']=len(M.R['v'])
M.R['min']=M.R['v'].min()
M.R['max']=M.R['v'].max()
M.info['projection']='GRASP-CUT,Mercatore'
return M
class MercatoreMap(MapGrid) :
"""A Mercator map: a map with x=column=longitude, y=row=colatitude"""
def __init__(self,*args) :
MapGrid.__init__(self)
self.info['projection']='GRASP-Mercatore'
def right_close_col(self,period=False,right_col_value=None) :
MapGrid.right_close_col(self,period=period,right_col_value=None)
if right_col_value != None :
try :
self.C['v'][-1]=float(right_col_value)
except :
pass
self.C['max'] = self.C['v'].max()
try :
self.M['long'][:,-1]=float(right_col_value)
except :
pass
#def resample(self,Long,Colat) :
#new=self.copy()
def unitPixelArea(self) :
import numpy as np
return np.deg2rad(self.R['delta'])*np.deg2rad(self.C['delta'])
def radialIntegral(self,arg,returnJustIntegral=False,thetaRange=None,asStruct=False) :
import numpy as np
sinColat=np.sin(np.deg2rad(self.M['colat']))
if type(arg) == type('') :
try :
field=self[arg]*sinColat*self.unitPixelArea()
except :
return None
else :
try :
field=arg*sinColat*self.unitPixelArea()
except :
return None
dIdtheta=np.zeros(self.R['n'])
for itheta in range(len(dIdtheta)) : dIdtheta[itheta]=0.5*(field[itheta][1:]+field[itheta][0:-1]).sum()
midH=0.5*(dIdtheta[1:]+dIdtheta[0:-1])
if returnJustIntegral and thetaRange==None: return np.sort(midH).sum()
Itheta=np.zeros(self.R['n'])
Itheta[0]=dIdtheta[0]*1
for itheta in range(1,len(Itheta)) : Itheta[itheta]=np.sort(midH[0:itheta+1]).sum()
if asStruct : return {'colat':self.M['colat'].mean(axis=1),'dIdcolat':dIdtheta,'Icolat':Itheta,'method':'spherical,trapezoidal'}
return self.M['colat'].mean(axis=1),dIdtheta,Itheta,'spherical,trapezoidal'
class DirectionalMapMoments_Base_Old:
def __init__(self,method,maxorder) :
"""computs the directional moments on a map using healpix integration"""
import numpy as np
self.method=method
self.TreatNaNasZero=True
self.TreatInfAsZero=True
self.TreatOutBoundsAsZero=True
if Bounds == None :
self.Bounds =np.array([-np.inf,np.inf])
else :
self.Bounds =Bounds
if type(Map) == type('') :
self.load(Map)
return
self.exclusionRadius = 180.
self.mu=None
self.nside=None
self.npix=None
self.pixelArea = None
self.n=None
self.maxorder=maxorder
self.Sum=np.zeros([self.maxorder,self.maxorder,self.maxorder])
for xp in range(maxorder) :
for yp in range(maxorder) :
for zp in range(maxorder) :
self.Sum[xp,yp,zp]=(m*x**xp*y**yp*z**zp).sum()
def __getitem__(self,xp,yp,zp) :
return self.Sum[xp,yp,zp]
def calc_integral(self) :
Pi4=4*np.pi
Integral = self.Sum*self.pixelArea
norm=Integral[0,0,0]
Sx=Integral[1,0,0]/norm
Sy=Integral[0,1,0]/norm
Sz=Integral[0,0,1]/norm
line=',%e,%e,%e,%e'%(Sx,Sy,Sz,norm)
return Sx,Sy,Sz,norm,line
def save(self,pickle_file) :
import pickle
if type(pickle_file)==type('') :
self.filename=pickle_file
try :
pickle.dump(self.__dict__,open(pickle_file,'w'))
except :
return False
else :
try :
pickle.dump(self.__dict__,pickle_file)
except :
return False
return True
def load(self,pickle_file) :
import pickle
if type(pickle_file)==type('') :
self.filename=pickle_file
try :
self.__dict__=pickle.load(open(pickle_file,'r'))
except :
return False
else :
try :
self.__dict__=pickle.load(pickle_file)
except :
return False
return True
class DirectionalMapMoments_GRD(DirectionalMapMoments_Base_Old) :
def __init__(self,GrdMap,exclusionRadius=None,reverse=False,excludeOut=False,NormalizedByBeam=False,asDict =True,Nside=None,ipixVec=None,maxorder=3,TreatNaNasZero=True,TreatInfAsZero=True,TreatOutBoundsAsZero=True,Bounds=None) :
"""computes the directional moments on a GRD map """
import numpy as np
DirectionalMapMoments_Base.__init__(self,'grd',maxorder)
class DirectionalMapMoments_CUT_Mercatore(DirectionalMapMoments_Base_Old) :
def __init__(self,GrdMap,exclusionRadius=None,reverse=False,excludeOut=False,NormalizedByBeam=False,asDict =True,Nside=None,ipixVec=None,maxorder=3,TreatNaNasZero=True,TreatInfAsZero=True,TreatOutBoundsAsZero=True,Bounds=None) :
"""computes the directional moments on a CUT map managed as a Mercatore map"""
import numpy as np
DirectionalMapMoments_Base.__init__(self,'cut-mercatore',maxorder)
class DirectionalMapMoments_Healpix(DirectionalMapMoments_Base_Old) :
def __init__(self,Map,exclusionRadius=None,reverse=False,excludeOut=False,NormalizedByBeam=False,asDict =True,Nside=None,ipixVec=None,maxorder=3,TreatNaNasZero=True,TreatInfAsZero=True,TreatOutBoundsAsZero=True,Bounds=None) :
"""computes the directional moments on a map using healpix integration"""
import numpy as np
import healpy as H
DirectionalMapMoments_Base.__init__(self,'healpix',maxorder)
self.TreatNaNasZero=TreatNaNasZero
self.TreatInfAsZero=TreatNaNasZero
self.TreatOutBoundsAsZero=TreatOutBoundsAsZero
if Bounds == None :
self.Bounds =np.array([-np.inf,np.inf])
else :
self.Bounds =Bounds
if type(Map) == type('') :
self.load(Map)
return
if exclusionRadius==None :
self.exclusionRadius = 180. if excludeOut else 0.
else :
self.exclusionRadius = exclusionRadius
self.mu=np.cos(self.exclusionRadius/180.*np.pi)
self.nside=int(np.sqrt(len(Map)/12.)) if Nside == None else int(Nside)
self.npix=int(12.*self.nside**2)
self.pixelArea = 4.*np.pi/float(12.*self.nside**2)
if ipixVec == None :
v=np.array(H.pix2vec(self.nside,nside2ipix(self.nside)))
idxAllowed=np.where(v[2]<=self.mu)
else :
v=np.array(H.pix2vec(self.nside,ipixVec))
idxAllowed=ipixVec
m=Map[idxAllowed]
x=v[0][idxAllowed]
y=v[1][idxAllowed]
z=v[2][idxAllowed]
if self.TreatOutBoundsAsZero :
idx = np.where((m<self.Bounds[0])*(self.Bounds[1]<m))[0]
if len(idx) > 0 : m[idx]=0.
if self.TreatNaNasZero :
idx = np.where(np.isnan(m))[0]
if len(idx) > 0 : m[idx]=0.
if self.TreatInfAsZero :
idx = np.where(1-np.isfinite(m))[0]
if len(idx) > 0 : m[idx]=0.
self.n=len(m)
self.maxorder=maxorder
self.Sum=np.zeros([self.maxorder,self.maxorder,self.maxorder])
for xp in range(maxorder) :
for yp in range(maxorder) :
for zp in range(maxorder) :
self.Sum[xp,yp,zp]=(m*x**xp*y**yp*z**zp).sum()
def calc_integral(self) :
Pi4=4*np.pi
Integral = iBSM.Sum*iBSM.pixelArea
norm=Integral[0,0,0]
Sx=Integral[1,0,0]/norm
Sy=Integral[0,1,0]/norm
Sz=Integral[0,0,1]/norm
line=',%e,%e,%e,%e'%(Sx,Sy,Sz,norm)
return Sx,Sy,Sz,norm,line
#from numpy import nan
#def cuts2matrix(self,No180Pad=No180Pad)
#;
#; converts a structure from get_cuts in a set of matrices, the output is a structure
#; theta = theta values converted from GRASP convention to usual polar convention
#; phi = phi values from GRASP convention to usual polar convention
#; c1r, c1i, c2r, c2i = components 1 and 2 real (r) and imaginary (i) parts
#; GRASP = a structure containing the original grasp theta and phi
#;
#; NOTE = usually GRASP does not generates cuts for PHI=180deg since it is
#; nothing else than PHI=0deg, by default CUTS2MATRIX add a PHI=180deg
#; cut. To exclude this set the /No180Pad keyword
#;
#;
#if not keyword_set(No180Pad) then Pad180 = 1 else Pad180 = 0
#nphi=n_tags(cuts)
#m1=dblarr(nphi+Pad180,cuts.(0).ntheta)
#theta=m1
#phi=m1
#long=m1
#colat=m1
#c1r=m1
#c1i=m1
#c2r=m1
#c2i=m1
#vtheta=cuts.(0).theta
#vphi=dblarr(nphi+Pad180)
#vlong=dblarr(nphi+Pad180)
#for iphi=0,nphi-1 do begin
#theta[iphi,*]=cuts.(iphi).theta
#phi[iphi,*]=cuts.(iphi).phi
#c1r[iphi,*]=cuts.(iphi).c1r
#c1i[iphi,*]=cuts.(iphi).c1i
#c2r[iphi,*]=cuts.(iphi).c2r
#c2i[iphi,*]=cuts.(iphi).c2i
#vphi[iphi]=cuts.(iphi).phi
#endfor
#if Pad180 ne 0 then begin
#; performs padding of 180 deg
#theta[nphi,*]=-reverse(cuts.(0).theta)
#phi[nphi,*]=cuts.(0).phi+180.
#c1r[nphi,*]=reverse(cuts.(0).c1r)
#c1i[nphi,*]=reverse(cuts.(0).c1i)
#c2r[nphi,*]=reverse(cuts.(0).c2r)
#c2i[nphi,*]=reverse(cuts.(0).c2i)
#vphi[nphi]=180.
#endif
#return,create_struct( $
#'theta',theta $
#,'phi',phi $
#,'c1r',c1r $
#,'c1i',c1i $
#,'c2r',c2r $
#,'c2i',c2i $
#,'GRASP',create_struct('theta',vtheta,'phi',vphi) $
#,'nphi',nphi $
#,'Pad180',Pad180 $
#,'phi0',vphi[0] $
#,'Delta_phi',vphi[1]-vphi[0] $
#,'theta0',vtheta[0] $
#,'Delta_theta',vtheta[1]-vtheta[0] $
#)
#end
#function cuts_grid,cuts
#; derives gridding parameters for a structure produced by get_cuts
#phi0=cuts.(0).phi
#theta0=cuts.(0).theta[0]
#dphi = cuts.(1).phi-cuts.(0).phi
#dtheta = cuts.(0).theta[1]-cuts.(0).theta[0]
#xmax = -1e6
#for kk = 0,n_tags(cuts)-1 do begin
#_xmax=max(abs(cuts.(0).theta))
#if _xmax gt xmax then xmax = _xmax
#endfor
#return,create_struct('phi0',phi0,'theta0',theta0,'dphi',dphi,'dtheta',dtheta,'thetamax',xmax)
#end
#function componentsTOstoke,component1_real,component1_imaginary,component2_real,component2_imaginary,direct=direct
#;
#; converts component maps to stokes
#;
#E1 = component1_real^2+component1_imaginary^2
#E2 = component2_real^2+component2_imaginary^2
#SI = E1+E2
#SQ = E1-E2
#E1 = sqrt(E1)
#E2 = sqrt(E2)
#F1 = ATAN(component1_imaginary,component1_real)
#F2 = ATAN(component2_imaginary,component2_real)
#SU = 2*E1*E2*COS(F2 - F1)
#SV = 2*E1*E2*SIN(F2 - F1)
#return,create_struct('I',SI,'Q',SQ,'U',SU,'V',SV,'F1',F1,'F2',F2)
#end
#function cuts2cartesian,cuts,side=side,stokes=stokes
#; converts a cut in a matrix using cartesian polar coordinates
#if not keyword_set(side) then side = 600
#phi0=cuts.(0).phi
#theta0=cuts.(0).theta[0]
#dphi = cuts.(1).phi-cuts.(0).phi
#dtheta = cuts.(0).theta[1]-cuts.(0).theta[0]
#npix=side+1
#xmin=-max(abs(cuts.(0).theta))
#xmax = -1e6
#for kk = 0,n_tags(cuts)-1 do begin
#_xmax=max(abs(cuts.(0).theta))
#if _xmax gt xmax then xmax = _xmax
#endfor
#ix0=(npix-1)/2
#iy0=(npix-1)/2
#xmap=dblarr(npix,npix)
#ymap=dblarr(npix,npix)
#for r = 0,npix-1 do xmap[r,*]=(double(indgen(npix))/double(npix-1)-0.5)*xmax*2
#for c = 0,npix-1 do ymap[*,c]=(double(indgen(npix))/double(npix-1)-0.5)*xmax*2
#colatmap=sqrt(xmap^2+ymap^2)
#longmap=atan(ymap,xmap)/!dpi*180.
#idx = where(longmap lt 0,count)
#if count gt 0 then longmap[idx]=360+longmap[idx]
#pt=longcolat2phitheta(longmap,colatmap)
#rc=longcolat2rowcol(longmap,colatmap,dphi=dphi,dtheta=dtheta,phi0=phi0,theta0=theta0)
#slm=cuts2matrix(cuts)
#c1r=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#c1i=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#c2r=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#c2i=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#out=create_struct('x',xmap,'y',ymap,'ix0',ix0,'iy0',iy0,'colat',colatmap,'long',longmap,'phi',pt.phi,'theta',pt.theta,'iphi',rc.iphi,'itheta',rc.itheta)
#if keyword_set(stokes) then begin
#return,create_struct(out,'stokes',componentsTOstoke(c1r,c1i,c2r,c2i))
#endif
#return,create_struct(out,'power',c1r^2+c1i^2+c2r^2+c2i^2)
#end
#function cuts2healpix,nside,cuts,reversed=reversed,ipix=ipix,onlyPower=onlyPower,dbi=dbi,stokes=stokes
#; convert a cuts into an healpix max by bilinear interpolation
#; if /onlyPower returns just the power otherwise returns
#; c1r = component 1 real part
#; c1i = component 1 imaginary part
#; c2r = component 2 real part
#; c2i = component 2 imaginary part
#; power
#; if /dbi power is converted in 10*alog10(power) (if /stokes this is not done)
#; if /stokes return stokes parameters and F1 and F2 instead of components c1, c2
#;
#if not keyword_set(reversed) then ipix=nside2ipix(nside) else ipix=nside2ipix(nside,/reversed)
#slm = cuts2matrix(cuts)
#rc=ipix2rowcol(nside,ipix,slm.phi0,slm.delta_phi,slm.theta0,slm.delta_theta)
#r1=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#i1=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#r2=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#I2=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#if keyword_set(stokes) then return,componentsTOstoke(r1,i1,r2,i2)
#power = r1^2+i1^2+r2^2+i2^2
#if keyword_set(dbi) then power=10d0*alog10(power)
#if keyword_set(onlyPower) then return,power
#return,create_struct('c1r',r1,'c2r',r2,'c1i',i1,'c2i',i2,'power',power)
#end
#function beamSumMap,map,exclusionRadius=exclusionRadius,reverse=reverse,v=v,excludeOut=excludeOut,notNormalizedByBeam=notNormalizedByBeam,asStruct=asStruct
#; computs the directional moments on a map
#if not keyword_set(excludeOut) then excludeOut = 0
#if not keyword_set(exclusionRadius) then $
#if excludeOut then exclusionRadius=[180d0] else exclusionRadius=[0d0]
#npix=n_elements(map)
#nside=long(sqrt(npix/12.))
#pix2vec_ring,nside,indgen(npix,/long),v
#z = v[*,2]
#for i = 0,2 do v[*,i]=v[*,i]*map
#sss = dblarr(10,n_elements(exclusionRadius))
#sss[9,*]=4d0*!dpi/double(npix)
#sss[5,*]=npix
#sss[6,*]=nside
#for ir=0,n_elements(exclusionRadius)-1 do begin
#xr=exclusionRadius[ir]
#sss[7,ir]=xr
#mu=cos(xr/180d0*!dpi)
#imin = 0
#imax = npix-1l
#count=-1
#if excludeOut then $
#if xr eq 180. then imax=npix-1 else imax = min(where(mu ge z,count)) $
#else $
#if xr eq 0. then imin=0 else imin = min(where(mu ge z,count))
#print,ir,xr,excludeOut,ir,count,imin,imax
#sss[8,ir]=count
#sss[4,ir]=total(map[imin:imax])
#for ic = 0,2 do sss[ic,ir] = total(v[imin:imax,ic])
#if not keyword_set(notNormalizedByBeam) then for ic = 0,2 do sss[ic,ir] = sss[ic,ir]/sss[4,ir]
#sss[3,ir]=sqrt(sss[0,ir]^2+sss[1,ir]^2+sss[2,ir]^2)
#for ic = 0,2 do sss[ic,ir]=sss[ic,ir]/sss[3,ir]
#endfor
#if not keyword_set(asStruct) then return,sss
#; computes the polar deflection
#polar_deflection=dblarr(n_elements(exclusionRadius))
#longitude_deflection=dblarr(n_elements(exclusionRadius))
#for ir=0,n_elements(exclusionRadius)-1 do begin
#polar_deflection=acos(sss[2,ir])*180d0/!dpi
#normxy = sqrt(total(sss[0:1,ir]^2))
#longitude_deflection=atan(sss[1,ir]/normxy,sss[0,ir]/normxy)*180d0/!dpi
#endfor
#return,create_struct($
#'vSx',sss[0,*] $
#,'vSy',sss[1,*] $
#,'vSz',sss[2,*] $
#,'S',sss[3,*] $
#,'beam_sum',sss[4,*] $
#,'npix',long(sss[5,*]) $
#,'nside',long(sss[6,*]) $
#,'exclusionRadius',sss[7,*] $
#,'excludedPixels',long(sss[8,*]) $
#,'pixelArea',sss[9,*] $
#,'deflection_polar_deg',polar_deflection $
#,'deflection_longitude_deg',longitude_deflection $
#)
#end
#function beamSums,nside,cuts,exclude_angle=exclude_angle,map=map,asStruct=asStruct
#; computes Sx, Sy. Sz (directional integrals) for a beam map
#map=cuts2healpix(nside,cuts,ipix=ipix,/onlyPower)
#pix2vec_ring,nside,ipix,v
#if keyword_set(exclude_angle) then begin
#print,exclude_angle
#ang = 180./dpi*acos(v[*,2])
#idx = where(ang > exclude_angle,count)
#if count gt 0 then begin
#v1=dblarr(n_elements(idx),3)
#for i=0,2 do v1[*,i]=v[idx,i]
#v=v1
#endif else begin
#print,"Error all pixels removed"
#return,0
#endelse
#endif
#sss = dblarr(7)
#sss[6]=nside
#sss[5]=12d0*double(nside)^2
#sss[4]=total(map)
#; returns the versors
#for i=0,2 do sss[i] = (total(v[*,i]*map))/sss[4]
#; normalize
#sss[3]=sqrt(total(sss[0:2]^2))
#for i=0,2 do sss[i] = sss[i]/sss[3]
#if not keyword_set(asStruct) then return,sss
#; computes the polar deflection
#polar_deflection_deg=acos(sss[2]/sss[3])*180d0/!dpi
#normxy = sqrt(total(sss[0:1]^2))
#longitude_deflection_deg=atan(sss[1]/normxy,sss[0]/normxy)*180d0/!dpi
#return,create_struct( $
#'vSx',sss[0] $
#,'vSy',sss[1] $
#,'vSz',sss[2] $
#,'S',sss[3] $
#,'beam_sum',sss[4] $
#,'npix',long(sss[5]) $
#,'nside',long(sss[6]) $
#,'deflection_polar_deg',polar_deflection_deg $
#,'deflection_longitude_deg',longitude_deflection_deg $
#)
#end
#function beamSumS2,lcuts,hcuts,nside=nside,map=map,returnFirst=returnFirst,returnSecond=returnSecond
#; computes Sx, Sy. Sz for a beam using two maps,
#; lcuts = a lowress map of cuts
#; hcuts = an highres map of cuts
#; /returnFirst returns just the high resolution map (no summation)
#; /returnSecond returns just the second map (no summation)
#;
#; high resolution integral
#hpa=cuts_grid(hcuts)
#if not keyword_set(nside) then nside = 1024l
#map=dblarr(12l*nside*nside)
#radius = -1e6
#for kk = 0,n_tags(hcuts)-1 do begin
#_xmax=max(abs(hcuts.(kk).theta))
#if _xmax gt radius then radius = _xmax
#endfor
#query_disc,nside,[0.,0.,1.],radius,ipix,/deg,/inclusive
#slm = cuts2matrix(hcuts)
#rc=ipix2rowcol(nside,ipix,hpa.phi0,hpa.dphi,hpa.theta0,hpa.dtheta)
#;dphi=hpa.dphi,dtheta=hpa.dtheta,phi0=hpa.phi0,theta0=hpa.theta0)
#r1=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#i1=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#r2=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#I2=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#map[ipix] = r1^2+r2^2+i1^2+i2^2
#if keyword_set(returnFirst) then return,map
#query_disc,nside,[0.,0.,-1.],180.-radius,ipix,/deg
#;ipix=nside2ipix(nside)
#slm = cuts2matrix(lcuts)
#lpa=cuts_grid(lcuts)
#rc=ipix2rowcol(nside,ipix,lpa.phi0,lpa.dphi,lpa.theta0,lpa.dtheta)
#r1=map_interpolate(rc.iphi,rc.itheta,slm.c1r)
#i1=map_interpolate(rc.iphi,rc.itheta,slm.c1i)
#r2=map_interpolate(rc.iphi,rc.itheta,slm.c2r)
#I2=map_interpolate(rc.iphi,rc.itheta,slm.c2i)
#map[ipix] = r1^2+r2^2+i1^2+i2^2
#if keyword_set(returnSecond) then return,map
#return,beamSumMap(map,/reverse,/asStruct)
#end
#function radialDependence,mapname,listRadiiDeg=listRadiiDeg
#if not keyword_set(listRadiiDeg) then listRadiiDeg=[0.1,0.5,1.,1.5,2.,2.5,5.,7.5,10.,20.,30.,40.,50,60,70,80.,85.,90,100,110,120,130,140,150,160,170,180]
#read_fits_map,mapname,mapX
#xxx = beamSumMap(mapX,exclusionRadius=listRadiiDeg,/excludeOut)
#sss=xxx
#for ic=0,2 do sss[ic,*]=sss[ic,*]/sss[4,*]
#sss[3,*]=sqrt(sss[0,*]^2+sss[1,*]^2+sss[2,*]^2)
#for ic=0,2 do sss[ic,*]=sss[ic,*]/sss[3,*]
#radius=sss[7,*]
#dump=sss[3,*]
#polar_deflection=acos(sss[2,*])/!dpi*180.*60.
#longitudinal_deflection=atan(sss[1,*],sss[0,*])/!dpi*180.
#return,create_struct('name',mapname,'long_def',longitudinal_deflection,'pol_def',polar_deflection,'dump',dump,'radius',radius)
#end
#function readgrd, fileinput
#; reads a grd file
#; (deprecated)
#xs = 0.d
#xs = 0.d
#ye = 0.d
#ye = 0.d
#str='!5 '
#ktype = 1l ; --> data type format
#nset = 1l ; --> number of beams in the file
#icomp = 1l ; --> field component
#ncomp = 1l ; --> number of components
#igrid = 1l ; --> type of field grid
#ix = 1l ; --> center of the beam
#iy = 1l ; (ix,iy)
#c1 = 0.d
#c2 = 0.d
#c3 = 0.d
#c4 = 0.d
#nx = 1l
#ny = 1l
#klimit = 1l
#openr,1,fileinput
#for line=0,100 do begin
#if (strtrim(str,2) ne '++++') then begin
#readf,1,str
#print,str
#endif else begin
#goto, jump1
#endelse
#endfor
#jump1: readf,1,ktype
#readf,1,nset,icomp,ncomp,igrid
#readf,1,ix,iy
#readf,1,xs,ys,xe,ye
#readf,1,nx,ny,klimit
#dx = (xe - xs)/(nx-1)
#x = findgen(nx)*dx + xs
#dy = (ye - ys)/(ny-1)
#y = findgen(ny)*dy + ys
#print,'Reading ', fileinput
#print,'grid of ', nx,' x ', ny,' points'
#c1r = dblarr(nx,ny)
#c1i = dblarr(nx,ny)
#c2r = dblarr(nx,ny)
#c2i = dblarr(nx,ny)
#for i=0,nx-1 do begin
#for j=0,ny-1 do begin
#readf,1,c1,c2,c3,c4
#c1r(j,i) = c1
#c1i(j,i) = c2
#c2r(j,i) = c3
#c2i(j,i) = c4
#endfor
#endfor
#close,1
#power = c1r^2 + c1i^2 + c2r^2 + c2i^2
#res = { x : x , $
#y : y , $
#power : power $
#}
#return, res
#end
def readgrd(inputfile) :
"""
; reads a grd file
; (deprecated)
Reference document:
LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS
M. Sandri
PL-LFI-PST-TN-044, 1.0,July 2003
"""
#xs = 0.d
#xs = 0.d
#ye = 0.d
#ye = 0.d
#str='!5 '
#ktype = 1l ; --> data type format
#nset = 1l ; --> number of beams in the file
#icomp = 1l ; --> field component
#ncomp = 1l ; --> number of components
#igrid = 1l ; --> type of field grid
#ix = 1l ; --> center of the beam
#iy = 1l ; (ix,iy)
#c1 = 0.d
#c2 = 0.d
#c3 = 0.d
#c4 = 0.d
#nx = 1l
#ny = 1l
#klimit = 1l
try :
h=open(inputfile,'r').readlines()
except :
print "File %s not found"%inputfile
return
# removes the new line
for i in range(len(h)) :
h[i] = h[i].split('\n')[0].split('\r')[0]
currentline=0
while(h[currentline] != '++++') :
currentline+=1
if currentline == len(h):
print "Error marker ++++ not found"
return h
infos=h[0:currentline]
currentline +=1
print h[currentline]
ktype = int(h[currentline])
currentline +=1
print h[currentline]
ll = h[currentline].split()
nset = int(ll[0])
icomp = int(ll[1])
ncomp = int(ll[2])
igrid = int(ll[3])
currentline +=1
print h[currentline]
ll = h[currentline].split()
ix = int(ll[0])
iy = int(ll[1])
currentline +=1
print h[currentline]
ll = h[currentline].split()
xs = float(ll[0])
ys = float(ll[1])
xe = float(ll[2])
ye = float(ll[3])
currentline +=1
print h[currentline]
ll = h[currentline].split()
nx = int(ll[0])
ny = int(ll[1])
klimit = int(ll[2])
dx = (xe - xs)/float(nx-1)
xcen=ix*dx
x = np.arange(nx)*dx + xs+xcen
dy = (ye - ys)/float(ny-1)
ycen=iy*dy
y = np.arange(ny)*dy + ys+ycen
print 'Reading ', inputfile
print 'grid of ', nx,' x ', ny,' points'
print 'ix ', ix,' iy ', iy
c1r = np.zeros([ny,nx])
c1i = np.zeros([ny,nx])
c2r = np.zeros([ny,nx])
c2i = np.zeros([ny,nx])
for j in range(ny) :
for i in range(nx) :
currentline +=1
ll = h[currentline].split()
c1r[j,i] = float(ll[0])
c1i[j,i] = float(ll[1])
c2r[j,i] = float(ll[2])
c2i[j,i] = float(ll[3])
return {'x':x,'y':y,'ri':c1r,'r2':c2r,'i1':c1i,'i2':c2i,'power':c1r**2 + c1i**2 + c2r**2 + c2i**2,'infos':infos}
class GridMap(MapGrid) :
def __init__(self,inputfile,skiplines=0,silent=False,closeColumn=False,Pickle=False,nodata=False,addPolar=True,addUV=True,addP1P2=True) :
MapGrid.__init__(self)
if Pickle :
self.load(inputfile)
return
self.info['grd_file']=inputfile.strip()
self.info['projection']='GRASP-GRD'
self.info['ReferenceDocument']="LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS\nM. Sandri\nPL-LFI-PST-TN-044, 1.0,July 2003"
if inputfile.strip() == '' : return
self.get_grd(inputfile,nodata=nodata)#,CounterPhi=CounterPhi,silent=silent,useCounterPhi=useCounterPhi)
if closeColumn :
self.right_close_col()
for k in self.M.keys() :
if k!='_row_values' and k!='_col_values' and k!='_row_index' and k!='_col_index' :
for r in range(self.R['n']) :
self.M[k][r][-1]=self.M[k][self.R['n']-1-r][0]
if addPolar :
self.M['colat']=np.rad2deg(np.arcsin((self.M['_col_values']**2+self.M['_row_values']**2)**0.5))
self.M['long']=np.mod(np.rad2deg(np.arctan2(self.M['_row_values'],self.M['_col_values'])),360.)
if addUV :
self.newmap(self.R['name'],unit='',value=self['_row_values'])
self.newmap(self.C['name'],unit='',value=self['_col_values'])
if addP1P2 :
self.newmap('p1',unit='',value=self['r1']**2+self['i1']**2)
self.newmap('p2',unit='',value=self['r2']**2+self['i2']**2)
def end_of_header_marker(self) :
"""returns the string used as a marker of end of header"""
return '++++'
def formatGrasp(self) : return {'float':' %17.10e','int':' %11d'}
def get_grd(self,inputfile,nodata=False) :
"""
reads a grd file
Reference document:
LFI BEAMS DELIVERY: FORMAT SPECIFICATIONS
M. Sandri
PL-LFI-PST-TN-044, 1.0,July 2003
Beware: the faster moving index is the column,
in IDL readgrd inverts rows with columns to take in
columns reading columns as they would be the slowest index.
"""
import sys
import numpy as np
import copy
try :
h=open(inputfile,'r').readlines()
self.mapname=inputfile
self.info['inputfile']=inputfile
except :
print "File %s not found"%inputfile
return
# removes the new line and other special characters
for i in range(len(h)) :
h[i] = h[i].split('\n')[0].split('\r')[0].strip()
currentline=0
while(h[currentline].strip() != self.end_of_header_marker()) :
currentline+=1
if currentline == len(h):
print "Error marker %s not found" % self.end_of_header_marker()
return h
self.info['header']=copy.deepcopy(h[0:currentline])
currentline +=1
self.info['ktype']=int(h[currentline])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['nset']=int(ll[0])
self.info['icomp']= int(ll[1])
self.info['ncomp']= int(ll[2])
self.info['igrid'] = int(ll[3])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['ix'] = int(ll[0])
self.info['iy'] = int(ll[1])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['xs'] = float(ll[0])
self.info['ys'] = float(ll[1])
self.info['xe'] = float(ll[2])
self.info['ye'] = float(ll[3])
currentline +=1
print h[currentline].strip()
ll = h[currentline].split()
self.info['nx'] = int(ll[0])
self.info['ny'] = int(ll[1])
self.info['klimit'] = int(ll[2])
#computed parameters
# X are columns
self.info['dx'] = (self.info['xe']-self.info['xs'])/float(self.info['nx']-1)
self.info['xcen'] = self.info['dx']*self.info['ix']
self.set_col_scale('U','uv',np.arange(self.info['nx'])*self.info['dx']+ self.info['xs']+self.info['xcen'])
# Y are rows
self.info['dy'] = (self.info['ye']-self.info['ys'])/float(self.info['ny']-1)
self.info['ycen'] = self.info['dy']*self.info['iy']
self.info['grd_file']=inputfile.strip()
self.set_row_scale('V','uv',np.arange(self.info['ny'])*self.info['dy']+ self.info['ys']+self.info['ycen'])
print 'Reading ', inputfile
print 'grid of ', self.info['nx'],' x ', self.info['ny'],' points'
if nodata :
return
# maps used for debug
self.newmap('_line_index','')
# compoenent maps
self.newmap('r1','')
self.newmap('i1','')
self.newmap('r2','')
self.newmap('i2','')
self.newmap('power','')
#self.newmap('rho1','')
#self.newmap('rho2','')
#self.newmap('phi1','')
#self.newmap('phi2','')
for r in range(self.R['n']) :
for c in range(self.C['n']) :
currentline +=1
self.M['_row_values'][r,c]=self.R['v'][r]*1.
self.M['_row_index'][r,c]=r*1.
self.M['_col_values'][r,c]=self.C['v'][c]*1.
self.M['_col_index'][r,c]=c*1.
self.M['_line_index'][r,c]=currentline*1.
ll = h[currentline].split()
self.M['r1'][r,c]=float(ll[0])
self.M['i1'][r,c]=float(ll[1])
self.M['r2'][r,c]=float(ll[2])
self.M['i2'][r,c]=float(ll[3])
self.M['power'][r,c]=float(ll[0])**2+float(ll[1])**2+float(ll[2])**2+float(ll[3])**2
#self.M['rho1'][r,c]=(float(ll[0])**2+float(ll[1])**2)**0.5
#self.M['rho2'][r,c]=(float(ll[2])**2+float(ll[3])**2)**0.5
#self.M['phi1'][r,c]=np.arctan2(float(ll[1]),float(ll[0]))
#self.M['phi2'][r,c]=np.arctan2(float(ll[3]),float(ll[2]))
def UV(self,Vectors=False) :
"""returns the U, V matrix
if Vectors=True the values of R and C are returned
"""
if Vectors :
return self.C['v'],self.R['v']
return self.M['_col_values'],self.M['_row_values']
def thetaUVphiUV(self,deg=True) :
"""returns the thetaUV, phiUV matrix
"""
return UV2thetaUVphiUV(self.M['_col_values'],self.M['_row_values'],deg=deg)
def cart3d(self) :
"""returns the x,y,z matrices
"""
import numpy as np
theta,phi=UV2thetaUVphiUV(self.M['_col_values'],self.M['_row_values'],deg=False)
return np.cos(phi)*np.sin(theta),np.sin(phi)*np.sin(theta),np.cos(theta)
def recompose_header(self,*arg,**karg) :
"keywords: inhdr header in input"
import copy
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
hdr=None
if karg.has_key('inhdr') :
hdr=copy.deepcopy(karg['inhdr'])
if hdr == None :
hdr=copy.deepcopy(self.info['header'])
if len(arg)>0 :
if type(arg[0]) == type('') :
hdr.append(arg[0])
else :
for k in arg[0] :
hdr.append(k)
hdr.append('In the lines after the header marker defined by 4 "+" characters')
hdr.append('line 1 : ktype')
hdr.append('line 2 : nset icomp ncomp igrid')
hdr.append('line 3 : ix iy')
hdr.append('line 4 : xs ys xe ye')
hdr.append('line 5 : nx ny klimit')
hdr.append(self.end_of_header_marker())
ll=''
ll+=fmtI%int(fmtI%int(self.info['ktype']))
hdr.append(ll.upper())
ll=''
ll+=fmtI%int(self.info['nset'])
ll+=fmtI%int(self.info['icomp'])
ll+=fmtI%int(self.info['ncomp'])
ll+=fmtI%int(self.info['igrid'])
hdr.append(ll.upper())
ll=''
ll+=fmtI%int(self.info['ix'])
ll+=fmtI%int(self.info['iy'])
hdr.append(ll.upper())
ll=''
ll+=fmtF%float(self.info['xs'])
ll+=fmtF%float(self.info['ys'])
ll+=fmtF%float(self.info['xe'])
ll+=fmtF%float(self.info['ye'])
hdr.append(ll.upper())
ll=''
ll+=fmtI%int(self.info['nx'])
ll+=fmtI%int(self.info['ny'])
ll+=fmtI%int(self.info['klimit'])
hdr.append(ll.upper())
return hdr
def recompose_map(self,*arg,**karg) :
import copy
fmtF=self.formatGrasp()['float']
fmtI=self.formatGrasp()['int']
if len(arg) > 0 :
lst=copy.deepcopy(arg[0])
else :
lst=[]
for r in range(self.R['n']) :
for c in range(self.C['n']) :
ll = fmtF%(self.M['r1'][r,c])
ll += fmtF%(self.M['i1'][r,c])
ll += fmtF%(self.M['r2'][r,c])
ll += fmtF%(self.M['i2'][r,c])
lst.append(ll.upper())
return lst
def FourColumnsPower(self,power1Name='p1',power2Name='p2',powerName='power') :
"a FourColumns map has r1=sqrt(p1), i1=0, r2=sqrt(p2), i2=0"
new=self.copy()
new.info['ktype']=1
if self.M.has_key(power1Name) and self.M.has_key(power1Name) :
new.info['ncomp']=2
new.M['r1']=self[power1Name]**0.5
new.M['r2']=self[power2Name]**0.5
new.M['i1']=self[power1Name]*0
new.M['i2']=self[power1Name]*0
elif self.M.has_key(power) :
new.info['ncomp']=1
new.M['r1']=self[power]**0.5
new.M['r2']=self[power]*0
new.M['i1']=self[power]*0
new.M['i2']=self[power]*0
else :
print "the map shall contain ",power1Name,power2Name," or ",powerName
return
return new
def ipix2longcolat(self,nside,ipix,nest=False,deg=True) :
""" converts an healpix ipix (ring) into index of phi and of theta in the matrix"""
from healpy import pix2ang
import numpy as np
colat,_long=pix2ang(nside,ipix,nest=nest)
if deg : return _long*180./np.pi,colat*180./np.pi
return _long,colat
def nside2ipix(self,nside,Reversed=False) :
""" converts nside into a list of pixels (ring)
reversed = True means the orderring is reversed
"""
return nside2ipix(nside,Reversed=Reversed)
def healpix(self,nside,mapname='power',nest=False,Reversed=False,colatrange=None,returnAll=False,usePeriodicalInterpolator=True) :
"""converts to healpix or a stack of healpix maps of given nside
colatrange=None , takes all the map
colatrange=']a,b['
colatrange='[a,b['
colatrange=']a,b]'
"""
import numpy as np
import healpy as H
if colatrange==None :
ipix=self.nside2ipix(nside,Reversed=Reversed)
phiUV,thetaUV = self.ipix2longcolat(nside,ipix,deg=False)
else :
fact=180./np.pi
prs=(colatrange.strip()).split(',')
left = [prs[0][0],float(prs[0][1:])]
right = [prs[1][-1],float(prs[1][0:-1])]
NPEQ=12*nside/2
print left,right
ipixmin=H.ang2pix(nside,left[1]/fact,0)-NPEQ
ipixmax=H.ang2pix(nside,right[1]/fact,0)+NPEQ
if ipixmin < 0 : ipixmin=0
if ipixmax > 12*nside**2-1 : ipixmax=12*nside**2
ipix = np.arange(ipixmin,ipixmax)
colat,Long = H.pix2ang(nside,ipix)
fl=np.ones(len(colat))
if left[1] == ']' :
fl*=(left[1]/fact)<colat
else :
fl*=(left[1]/fact)<=colat
if right[1] == '[' :
fl*=colat<(right[1]/fact)
else :
fl*=colat<=(right[1]/fact)
idx=np.where(fl)[0]
ipix=ipix[idx]
thetaUV=colat[idx]
phiUV=Long[idx]
fl=None
idx=None
colat=None
Long=None
U,V = thetaUVphiUV2UV(thetaUV,phiUV,deg=False)
r1=self.bilinearXY(mapname,U,V)
if returnAll : return r1,ipix,U,V,thetaUV,phiUV
return r1,ipix
def healpix_pixelArea(self,nside) :
import numpy as np
return 4*pi/(12.*65536.**2)
def healpix_integral(self,nside,mapname,Reversed=False,colatrange=None) :
import numpy as np
h=self.healpix(nside,mapname=mapname,nest=False,Reversed=Reversed,colatrange=colatrange,returnAll=False)
pixelaArea=4*np.pi/(12.*65536.**2)
return sort(h[0]).sum()*pixelaArea
def maximumRadius(self) :
"returns the largest possible radius for an inscribed circle"
a=self.C['v'].ptp()/2.
b=self.R['v'].ptp()/2.
return a if a < b else b
def circularMask(self,*arg) :
"returns a mask for the largest possible circle inscribed in the map"
mask = np.array(self.radius()<=self.maximumRadius(),dtype='int')
if len(arg) == 0 : return mask
try :
self.M[arg]*=mask
except :
print arg," not a valid name"
def unitPixelArea(self) :
import numpy as np
return self.R['delta']*self.C['delta']
def radialIntegral(self,arg,method='planar,simpson',returnJustIntegral=False,asStruct=False,nRadii=51) :
"""
returns a radial integral from 0 up to the maximum possible radius divided in nRadii steps
integration method given by "method" keyword, default 'raster,planar,trapezoidal'
raster,planar,trapezoidal :
raster over the rows of the grd map forcing to zero any sample outside the wanted ring
planar,trapezoidal :
uses direct trapezoidal integration
planar,simpson :
uses direct simpson integration
some test shows the difference between planar,simpson and planar,trapezoidal is order of magnitudes larger
than the difference between planar,trapezoidal and raster,planar,trapezoidal
so default is planar,simpson
"""
import numpy as np
if returnJustIntegral :
mm=self.circularMask()*self[arg] if type(arg) == type('') else self.circularMask()*arg
return self.simpson2d(mm) if 'planar,simpson' else self.trapz2d(mm)
oradius=np.arange(nRadii)/float(nRadii-1)*self.maximumRadius()
oradius[nRadii-1]=self.maximumRadius()
_r=self.radius()
mm=self[arg] if type(arg) == type('') else arg
Itheta=np.zeros(len(oradius))
#
if method=='raster,planar,trapezoidal' :
for j in range(len(oradius)) :
u=mm*1
u[np.where(_r>oradius[j])]=0.
acc=np.zeros(u.shape[0])
for r in range(u.shape[0]) :
x=u[r,1:]
acc[r]=((x[1:]+x[0:-1])/2.).sum()
Itheta[j]=(acc[1:]+acc[0:-1]).sum()/2.
Itheta*=self.unitPixelArea()
#
elif method=='planar,trapezoidal' :
for j in range(len(oradius)) :
Itheta[j]=self.trapz2d(mm,outerCut=oradius[j])
#
elif method=='planar,simpson' :
for j in range(len(oradius)) :
Itheta[j]=self.simpson2d(mm,outerCut=oradius[j])
#
else :
print "Unknown integration method %s"%method
return None
#
if asStruct : return {'colat':oradius.mean(axis=1),'dIdcolat':Itheta[1:]-Itheta[0:-1],'Icolat':Itheta,'method':method}
return oradius,Itheta[1:]-Itheta[0:-1],Itheta,method
if __name__=='__main__' :
import numpy as np
print "load"
h={}
h['30']=GraspMap('mb/FB_LFI27_SWE_X_FM_1-0.cut',12)
h['27']=GraspMap('outband/FB_LFI27_SWE_X_FM_1-0_27.cut',0)
h['33']=GraspMap('outband/FB_LFI27_SWE_X_FM_1-0_33.cut',0)
lambda0=1/30.
lambda1=1/27.
WL=np.array([1/27.,1/30.])
deltaWL=WL[1]-WL[0]
print "parameters"
PARS={}
for k in ['r1','i1','r2','i2'] :
A=h['27'].M[k]*1
B=h['30'].M[k]*1
A.shape=A.shape[0]*A.shape[1]
B.shape=B.shape[0]*B.shape[1]
# PARS[k]=np.polyfit(WL,np.array([A,B]),1)
PARS[k]=np.array([(B-A)/deltaWL,A])
C=h['33'].M['power']*1
C.shape=C.shape[0]*C.shape[1]
print "lambda interpolate"
ipt = {}
for nu in [27,28,29,30,31,32,33] :
ipt[str(nu)] ={}
for k in ['r1','i1','r2','i2'] :
#ipt[str(nu)][k]=np.polyval(PARS[k],1/float(nu))
ipt[str(nu)][k]=PARS[k][0]*(1/float(nu)-WL[0])+PARS[k][1]
ipt[str(nu)]['power']=ipt[str(nu)]['r1']**2+ipt[str(nu)]['i1']**2+ipt[str(nu)]['r2']**2+ipt[str(nu)]['i2']**2
print nu,1/float(nu),10*np.log10(ipt[str(nu)]['power'].max())
print 10*np.log10(C.max())
R=h['27'].M['power']*1 ; R.shape=R.shape[0]*R.shape[1]
G=h['30'].M['power']*1 ; G.shape=G.shape[0]*G.shape[1]
B=h['33'].M['power']*1 ; B.shape=B.shape[0]*B.shape[1]
I=np.arange(len(R))*3
sys.exit()
from matplotlib import pyplot as plt
plt.close('all')
plt.figure()
plt.plot(I-3./(33.-27.),10*np.log10(R),'r.')
plt.plot(I+0./(33.-27.),10*np.log10(G),'g.')
plt.plot(I+3./(33.-27.),10*np.log10(B),'b.')
plt.show()
| gpl-2.0 |
huobaowangxi/scikit-learn | sklearn/cluster/tests/test_k_means.py | 132 | 25860 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
amacd31/bom_data_parser | setup.py | 1 | 1476 | import os
from io import open
import versioneer
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = ''.join([
line for line in f.readlines() if 'travis-ci' not in line
])
setup(
name='bom_data_parser',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Basic library for parsing data formats supplied by the Australian Bureau of Meteorology.',
long_description=long_description,
author='Andrew MacDonald',
author_email='[email protected]',
license='BSD',
url='https://github.com/amacd31/bom_data_parser',
install_requires= [
'numpy',
'pandas',
'beautifulsoup4',
'lxml',
],
packages = ['bom_data_parser'],
test_suite = 'nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| bsd-3-clause |
thanhquanky/dejavu | run_tests.py | 16 | 6040 | from dejavu.testing import *
from dejavu import Dejavu
from optparse import OptionParser
import matplotlib.pyplot as plt
import time
import shutil
usage = "usage: %prog [options] TESTING_AUDIOFOLDER"
parser = OptionParser(usage=usage, version="%prog 1.1")
parser.add_option("--secs",
action="store",
dest="secs",
default=5,
type=int,
help='Number of seconds starting from zero to test')
parser.add_option("--results",
action="store",
dest="results_folder",
default="./dejavu_test_results",
help='Sets the path where the results are saved')
parser.add_option("--temp",
action="store",
dest="temp_folder",
default="./dejavu_temp_testing_files",
help='Sets the path where the temp files are saved')
parser.add_option("--log",
action="store_true",
dest="log",
default=True,
help='Enables logging')
parser.add_option("--silent",
action="store_false",
dest="silent",
default=False,
help='Disables printing')
parser.add_option("--log-file",
dest="log_file",
default="results-compare.log",
help='Set the path and filename of the log file')
parser.add_option("--padding",
action="store",
dest="padding",
default=10,
type=int,
help='Number of seconds to pad choice of place to test from')
parser.add_option("--seed",
action="store",
dest="seed",
default=None,
type=int,
help='Random seed')
options, args = parser.parse_args()
test_folder = args[0]
# set random seed if set by user
set_seed(options.seed)
# ensure results folder exists
try:
os.stat(options.results_folder)
except:
os.mkdir(options.results_folder)
# set logging
if options.log:
logging.basicConfig(filename=options.log_file, level=logging.DEBUG)
# set test seconds
test_seconds = ['%dsec' % i for i in range(1, options.secs + 1, 1)]
# generate testing files
for i in range(1, options.secs + 1, 1):
generate_test_files(test_folder, options.temp_folder,
i, padding=options.padding)
# scan files
log_msg("Running Dejavu fingerprinter on files in %s..." % test_folder,
log=options.log, silent=options.silent)
tm = time.time()
djv = DejavuTest(options.temp_folder, test_seconds)
log_msg("finished obtaining results from dejavu in %s" % (time.time() - tm),
log=options.log, silent=options.silent)
tests = 1 # djv
n_secs = len(test_seconds)
# set result variables -> 4d variables
all_match_counter = [[[0 for x in xrange(tests)] for x in xrange(3)] for x in xrange(n_secs)]
all_matching_times_counter = [[[0 for x in xrange(tests)] for x in xrange(2)] for x in xrange(n_secs)]
all_query_duration = [[[0 for x in xrange(tests)] for x in xrange(djv.n_lines)] for x in xrange(n_secs)]
all_match_confidence = [[[0 for x in xrange(tests)] for x in xrange(djv.n_lines)] for x in xrange(n_secs)]
# group results by seconds
for line in range(0, djv.n_lines):
for col in range(0, djv.n_columns):
# for dejavu
all_query_duration[col][line][0] = djv.result_query_duration[line][col]
all_match_confidence[col][line][0] = djv.result_match_confidence[line][col]
djv_match_result = djv.result_match[line][col]
if djv_match_result == 'yes':
all_match_counter[col][0][0] += 1
elif djv_match_result == 'no':
all_match_counter[col][1][0] += 1
else:
all_match_counter[col][2][0] += 1
djv_match_acc = djv.result_matching_times[line][col]
if djv_match_acc == 0 and djv_match_result == 'yes':
all_matching_times_counter[col][0][0] += 1
elif djv_match_acc != 0:
all_matching_times_counter[col][1][0] += 1
# create plots
djv.create_plots('Confidence', all_match_confidence, options.results_folder)
djv.create_plots('Query duration', all_query_duration, options.results_folder)
for sec in range(0, n_secs):
ind = np.arange(3) #
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1 * width, 2.75])
means_dvj = [round(x[0] * 100 / djv.n_lines, 1) for x in all_match_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Percentage')
ax.set_title('%s Matching Percentage' % test_seconds[sec])
ax.set_xticks(ind + width)
labels = ['yes','no','invalid']
ax.set_xticklabels( labels )
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
#ax.legend((rects1[0]), ('Dejavu'), loc='center left', bbox_to_anchor=(1, 0.5))
autolabeldoubles(rects1,ax)
plt.grid()
fig_name = os.path.join(options.results_folder, "matching_perc_%s.png" % test_seconds[sec])
fig.savefig(fig_name)
for sec in range(0, n_secs):
ind = np.arange(2) #
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1*width, 1.75])
div = all_match_counter[sec][0][0]
if div == 0 :
div = 1000000
means_dvj = [round(x[0] * 100 / div, 1) for x in all_matching_times_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Accuracy')
ax.set_title('%s Matching Times Accuracy' % test_seconds[sec])
ax.set_xticks(ind + width)
labels = ['yes','no']
ax.set_xticklabels( labels )
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
#ax.legend( (rects1[0]), ('Dejavu'), loc='center left', bbox_to_anchor=(1, 0.5))
autolabeldoubles(rects1,ax)
plt.grid()
fig_name = os.path.join(options.results_folder, "matching_acc_%s.png" % test_seconds[sec])
fig.savefig(fig_name)
# remove temporary folder
shutil.rmtree(options.temp_folder)
| mit |
mstritt/orbit-image-analysis | src/main/python/deeplearn/model.py | 1 | 16297 | from datetime import datetime
import os
import sys
import time
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from network import *
from utils import ImageReader, decode_labels, inv_preprocess, prepare_label, write_log
"""
This script trains or evaluates the model on augmented PASCAL VOC 2012 dataset.
The training set contains 10581 training images.
The validation set contains 1449 validation images.
Training:
'poly' learning rate
different learning rates for different layers
"""
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
class Model(object):
def __init__(self, sess, conf):
self.sess = sess
self.conf = conf
# train
def train(self):
self.train_setup()
self.sess.run(tf.global_variables_initializer())
# Load the pre-trained model if provided
if self.conf.pretrain_file is not None:
self.load(self.loader, self.conf.pretrain_file)
# Start queue threads.
threads = tf.train.start_queue_runners(coord=self.coord, sess=self.sess)
# log_var for tensorboard
log_var = tf.Variable(0.0)
summary_loss = [tf.summary.scalar("loss", log_var)]
write_op = tf.summary.merge(summary_loss)
#write_op = tf.summary.merge_all()
#self.sess.run(tf.global_variables_initializer())
# Train!
for step in range(self.conf.num_steps+1):
start_time = time.time()
feed_dict = { self.curr_step : step }
if step % self.conf.save_interval == 0:
loss_value, images, labels, preds, summary, _ = self.sess.run(
[self.reduced_loss,
self.image_batch,
self.label_batch,
self.pred,
self.total_summary,
self.train_op],
feed_dict=feed_dict)
self.summary_writer.add_summary(summary, step)
self.save(self.saver, step)
else:
loss_value, _ = self.sess.run([self.reduced_loss, self.train_op],
feed_dict=feed_dict)
duration = time.time() - start_time
print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))
write_log('{:d}, {:.3f}'.format(step, loss_value), self.conf.logfile)
#write logs for tensorboard
summary2 = self.sess.run(write_op, {log_var: loss_value})
self.summary_writer.add_summary(summary2, step)
#self.summary_writer.flush()
# finish
self.coord.request_stop()
self.coord.join(threads)
# evaluate
def test(self):
self.test_setup()
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
# load checkpoint
checkpointfile = self.conf.modeldir+ '/model.ckpt-' + str(self.conf.valid_step)
self.load(self.loader, checkpointfile)
# Start queue threads.
threads = tf.train.start_queue_runners(coord=self.coord, sess=self.sess)
areasOverlap = np.zeros(self.conf.valid_num_steps, np.float32)
areasPredicted = np.zeros(self.conf.valid_num_steps, np.float32)
areasGtObj = np.zeros(self.conf.valid_num_steps, np.float32)
# Test!
for step in range(self.conf.valid_num_steps):
preds, _, _, areaOverlap, areaGTObj, areaPredicted, conv_out, conv_weights = self.sess.run([self.pred, self.accu_update_op, self.mIou_update_op, self.areaOverlap, self.areaGTObj, self.areaPredicted, tf.get_collection('conv_output'), tf.get_collection('conv_weights')])
if(self.conf.create_plots):
print('create plot '+ plot_name)
self.plot_conv_output(conv_out[i], plot_name)
'''
for i in range(len(conv_out)):
plot_name='step_{}_conv_{}'.format(step, i)
print('create plot '+ plot_name)
self.plot_conv_output(conv_out[i], plot_name)
for i in range(len(conv_weights)):
plot_name='step_{}_weights_{}'.format(step, i)
print('create plot '+ plot_name)
self.plot_conv_weights(conv_weights[i], plot_name)
'''
#Save conv imaages
areasOverlap[step] = areaOverlap
areasGtObj[step] = areaGTObj
areasPredicted[step] = areaPredicted
print('step {:d}'.format(step))
print('\tareaOverlap: {:.0f}'.format(areaOverlap))
print('\tareaGTObj: {:.0f}'.format(areaGTObj))
print('\tareaPredicted: {:.0f}'.format(areaPredicted))
print('Pixel Accuracy: {:.3f}'.format(self.accu.eval(session=self.sess)))
print('Mean IoU: {:.3f}'.format(self.mIoU.eval(session=self.sess)))
tp = ((areasOverlap[areasGtObj>0.0] / areasGtObj[areasGtObj>0]) >= 0.5).sum()
fp = ((areasOverlap[areasGtObj>0.0] / areasGtObj[areasGtObj>0]) < 0.5).sum()
tn = (areasGtObj[areasPredicted <= 0] <= 0).sum()
fn = (areasGtObj[areasPredicted > 0] <= 0).sum()
print('tp', tp, 'fp', fp, 'tn', tn, 'fn', fn)
precision = tp/(tp + fp);
recall = tp/(tp + fn);
score = (2*precision*recall)/(precision+recall);
print('precision', precision, 'recall', recall, 'score', score)
self.coord.request_stop()
self.coord.join(threads)
def train_setup(self):
tf.set_random_seed(self.conf.random_seed)
# Create queue coordinator.
self.coord = tf.train.Coordinator()
# Input size
input_size = (self.conf.input_height, self.conf.input_width)
# Load reader
with tf.name_scope("create_inputs"):
reader = ImageReader(
self.conf.data_dir,
self.conf.data_list,
input_size,
self.conf.random_scale,
self.conf.random_mirror,
self.conf.ignore_label,
self.coord)
self.image_batch, self.label_batch = reader.dequeue(self.conf.batch_size)
self.image_batch = tf.identity( self.image_batch, name='input_batch' )
self.image_batch -= IMG_MEAN
# Create network
if self.conf.encoder_name not in ['res101', 'res50', 'deeplab']:
print('encoder_name ERROR!')
print("Please input: res101, res50, or deeplab")
sys.exit(-1)
elif self.conf.encoder_name == 'deeplab':
net = Deeplab_v2(self.image_batch, self.conf.num_classes, True)
# Variables that load from pre-trained model.
restore_var = [v for v in tf.global_variables() if 'fc' not in v.name]
# Trainable Variables
all_trainable = tf.trainable_variables()
# Fine-tune part
encoder_trainable = [v for v in all_trainable if 'fc' not in v.name] # lr * 1.0
# Decoder part
decoder_trainable = [v for v in all_trainable if 'fc' in v.name]
else:
net = ResNet_segmentation(self.image_batch, self.conf.num_classes, True, self.conf.encoder_name)
# Variables that load from pre-trained model.
restore_var = [v for v in tf.global_variables() if 'resnet_v1' in v.name]
# Trainable Variables
all_trainable = tf.trainable_variables()
# Fine-tune part
encoder_trainable = [v for v in all_trainable if 'resnet_v1' in v.name] # lr * 1.0
# Decoder part
decoder_trainable = [v for v in all_trainable if 'decoder' in v.name]
decoder_w_trainable = [v for v in decoder_trainable if 'weights' in v.name or 'gamma' in v.name] # lr * 10.0
decoder_b_trainable = [v for v in decoder_trainable if 'biases' in v.name or 'beta' in v.name] # lr * 20.0
# Check
assert(len(all_trainable) == len(decoder_trainable) + len(encoder_trainable))
assert(len(decoder_trainable) == len(decoder_w_trainable) + len(decoder_b_trainable))
# Network raw output
raw_output = net.outputs # [batch_size, h, w, 21]
# Output size
output_shape = tf.shape(raw_output)
output_size = (output_shape[1], output_shape[2])
# Groud Truth: ignoring all labels greater or equal than n_classes
label_proc = prepare_label(self.label_batch, output_size, num_classes=self.conf.num_classes, one_hot=False)
raw_gt = tf.reshape(label_proc, [-1,])
indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, self.conf.num_classes - 1)), 1)
gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)
raw_prediction = tf.reshape(raw_output, [-1, self.conf.num_classes])
prediction = tf.gather(raw_prediction, indices)
# Pixel-wise softmax_cross_entropy loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
# L2 regularization
l2_losses = [self.conf.weight_decay * tf.nn.l2_loss(v) for v in all_trainable if 'weights' in v.name]
# Loss function
self.reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)
# Define optimizers
# 'poly' learning rate
base_lr = tf.constant(self.conf.learning_rate)
self.curr_step = tf.placeholder(dtype=tf.float32, shape=())
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - self.curr_step / self.conf.num_steps), self.conf.power))
# We have several optimizers here in order to handle the different lr_mult
# which is a kind of parameters in Caffe. This controls the actual lr for each
# layer.
opt_encoder = tf.train.MomentumOptimizer(learning_rate, self.conf.momentum)
opt_decoder_w = tf.train.MomentumOptimizer(learning_rate * 10.0, self.conf.momentum)
opt_decoder_b = tf.train.MomentumOptimizer(learning_rate * 20.0, self.conf.momentum)
# To make sure each layer gets updated by different lr's, we do not use 'minimize' here.
# Instead, we separate the steps compute_grads+update_params.
# Compute grads
grads = tf.gradients(self.reduced_loss, encoder_trainable + decoder_w_trainable + decoder_b_trainable)
grads_encoder = grads[:len(encoder_trainable)]
grads_decoder_w = grads[len(encoder_trainable) : (len(encoder_trainable) + len(decoder_w_trainable))]
grads_decoder_b = grads[(len(encoder_trainable) + len(decoder_w_trainable)):]
# Update params
train_op_conv = opt_encoder.apply_gradients(zip(grads_encoder, encoder_trainable))
train_op_fc_w = opt_decoder_w.apply_gradients(zip(grads_decoder_w, decoder_w_trainable))
train_op_fc_b = opt_decoder_b.apply_gradients(zip(grads_decoder_b, decoder_b_trainable))
# Finally, get the train_op!
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # for collecting moving_mean and moving_variance
with tf.control_dependencies(update_ops):
self.train_op = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)
# Saver for storing checkpoints of the model
self.saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=0)
# Loader for loading the pre-trained model
self.loader = tf.train.Saver(var_list=restore_var)
# Training summary
# Processed predictions: for visualisation.
raw_output_up = tf.image.resize_bilinear(raw_output, input_size)
raw_output_up = tf.argmax(raw_output_up, axis=3)
self.pred = tf.expand_dims(raw_output_up, dim=3)
# Image summary.
images_summary = tf.py_func(inv_preprocess, [self.image_batch, 2, IMG_MEAN], tf.uint8)
labels_summary = tf.py_func(decode_labels, [self.label_batch, 2, self.conf.num_classes], tf.uint8)
preds_summary = tf.py_func(decode_labels, [self.pred, 2, self.conf.num_classes], tf.uint8)
self.total_summary = tf.summary.image('images',
tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]),
max_outputs=2) # Concatenate row-wise.
if not os.path.exists(self.conf.logdir):
os.makedirs(self.conf.logdir)
self.summary_writer = tf.summary.FileWriter(self.conf.logdir, graph=tf.get_default_graph())
def test_setup(self):
# Create queue coordinator.
self.coord = tf.train.Coordinator()
# Load reader
with tf.name_scope("create_inputs"):
reader = ImageReader(
self.conf.data_dir,
self.conf.valid_data_list,
None, # the images have different sizes
False, # no data-aug
False, # no data-aug
self.conf.ignore_label,
self.coord)
image, label = reader.image, reader.label # [h, w, 3 or 1]
# Add one batch dimension [1, h, w, 3 or 1]
self.image_batch, self.label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0)
self.image_batch = tf.identity( self.image_batch, name='image_batch')
self.image_batch -= IMG_MEAN
# Create network
if self.conf.encoder_name not in ['res101', 'res50', 'deeplab']:
print('encoder_name ERROR!')
print("Please input: res101, res50, or deeplab")
sys.exit(-1)
elif self.conf.encoder_name == 'deeplab':
net = Deeplab_v2(self.image_batch, self.conf.num_classes, False)
else:
net = ResNet_segmentation(self.image_batch, self.conf.num_classes, False, self.conf.encoder_name)
# predictions
raw_output = net.outputs
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(self.image_batch)[1:3,])
raw_output = tf.argmax(raw_output, axis=3)
pred = tf.expand_dims(raw_output, dim=3)
self.pred = tf.reshape(pred, [-1,], name="predictions")
# labels
gt = tf.reshape(self.label_batch, [-1,])
# Ignoring all labels greater than or equal to n_classes.
temp = tf.less_equal(gt, self.conf.num_classes - 1)
weights = tf.cast(temp, tf.int32)
# fix for tf 1.3.0
gt = tf.where(temp, gt, tf.cast(temp, tf.uint8))
# Pixel accuracy
self.accu, self.accu_update_op = tf.contrib.metrics.streaming_accuracy(
self.pred, gt, weights=weights)
# mIoU
self.mIoU, self.mIou_update_op = tf.contrib.metrics.streaming_mean_iou(
self.pred, gt, num_classes=self.conf.num_classes, weights=weights)
# f1 score
pred = tf.cast(self.pred, tf.int32)
gt = tf.cast(gt, tf.int32)
self.areaOverlap = tf.count_nonzero(pred * gt)
self.areaGTObj = tf.count_nonzero(gt)
self.areaPredicted = tf.count_nonzero(pred)
# Loader for loading the checkpoint
self.loader = tf.train.Saver(var_list=tf.global_variables())
def save(self, saver, step):
'''
Save weights.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(self.conf.modeldir, model_name)
if not os.path.exists(self.conf.modeldir):
os.makedirs(self.conf.modeldir)
saver.save(self.sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(self, saver, filename):
'''
Load trained weights.
'''
saver.restore(self.sess, filename)
print("Restored model parameters from {}".format(filename))
def plot_conv_weights(self, weights, name, channels_all=True):
plot_dir = os.path.join('./plots', 'conv_weights')
plot_dir = os.path.join(plot_dir, name)
# create directory if does not exist, otherwise empty it
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
w_min = np.min(weights)
w_max = np.max(weights)
channels = [0]
if channels_all:
channels = range(weights.shape[2])
num_filters = weights.shape[3]
grid_r, grid_c = self.get_grid_dim(num_filters)
fig, axes = plt.subplots(min([grid_r, grid_c]), max([grid_r, grid_c]))
# iterate channels
for channel in channels:
# iterate filters inside every channel
for l, ax in enumerate(axes.flat):
# get a single filter
img = weights[:, :, channel, l]
# put it on the grid
ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='nearest', cmap='seismic')
# remove any labels from the axes
ax.set_xticks([])
ax.set_yticks([])
plt.savefig(os.path.join(plot_dir, '{}-{}.png'.format(name, channel)), bbox_inches='tight')
plt.close(fig)
def plot_conv_output(self, conv_img, name):
# make path to output folder
plot_dir = os.path.join('./plots', 'conv_output')
plot_dir = os.path.join(plot_dir, name)
# create directory if does not exist, otherwise empty it
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
w_min = np.min(conv_img)
w_max = np.max(conv_img)
# get number of convolutional filters
num_filters = conv_img.shape[3]
# get number of grid rows and columns
grid_r, grid_c = self.get_grid_dim(num_filters)
# create figure and axes
fig, axes = plt.subplots(min([grid_r, grid_c]), max([grid_r, grid_c]), figsize=(150, 150))
# iterate filters
for l, ax in enumerate(axes.flat):
# get a single image
img = conv_img[0, :, :, l]
# put it on the grid
ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='bicubic', cmap='Greys')
# remove any labels from the axes
ax.set_xticks([])
ax.set_yticks([])
# save figure
plt.savefig(os.path.join(plot_dir, '{}.png'.format(name)), bbox_inches='tight')
plt.close(fig)
def get_grid_dim(self, x):
factors = self.prime_powers(x)
if len(factors) % 2 == 0:
i = int(len(factors) / 2)
return factors[i], factors[i - 1]
i = len(factors) // 2
return factors[i], factors[i]
def prime_powers(self, n):
factors = set()
for x in range(1, int(math.sqrt(n)) + 1):
if n % x == 0:
factors.add(int(x))
factors.add(int(n // x))
return sorted(factors)
| gpl-3.0 |
efabless/openlane | scripts/compare_regression_reports.py | 1 | 15876 | # Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import csv
import pandas as pd
from collections import OrderedDict
import xlsxwriter
parser = argparse.ArgumentParser(
description="Compare benchmark csv to a regression results csv")
parser.add_argument('--benchmark', '-b', action='store', required=True,
help="The csv file from which to extract the benchmark results")
parser.add_argument('--regression_results', '-r', action='store', required=True,
help="The csv file to be tested")
parser.add_argument('--use_regression_results_as_design_list_source', '-ur', action='store_true', default=False,
help="uses the regression results instead of the benchmark as the source of the designs list")
parser.add_argument('--output_report', '-o', action='store', required=True,
help="The file to print the final report in")
parser.add_argument('--output_xlsx', '-x', action='store', required=True,
help="The csv file to print a merged csv file benchmark vs regression_script in")
args = parser.parse_args()
benchmark_file = args.benchmark
regression_results_file = args.regression_results
output_report_file = args.output_report
output_xlsx_file = args.output_xlsx
regression_results_as_design_list_source=args.use_regression_results_as_design_list_source
benchmark =dict()
regression_results =dict()
output_report_list = []
testFail = False
configuration_mismatches = []
critical_mismatches = []
missing_configs = []
base_configs = ['CLOCK_PERIOD', 'SYNTH_STRATEGY', 'SYNTH_MAX_FANOUT','FP_CORE_UTIL', 'FP_ASPECT_RATIO',
'FP_PDN_VPITCH', 'FP_PDN_HPITCH', 'PL_TARGET_DENSITY', 'GLB_RT_ADJUSTMENT', 'STD_CELL_LIBRARY', 'CELL_PAD', 'DIODE_INSERTION_STRATEGY']
tolerance = {'general_tolerance':1, 'tritonRoute_violations':2, 'Magic_violations':10, 'antenna_violations':10,'lvs_total_errors':0}
critical_statistics = ['tritonRoute_violations','Magic_violations', 'antenna_violations','lvs_total_errors']
note_worthy_statistics = []
ignore_list = ['', 'design', 'design_name', 'config']
def compare_vals(benchmark_value, regression_value,param):
if str(benchmark_value) == "-1":
return True
if str(regression_value) == "-1":
return False
tol = 0-tolerance['general_tolerance']
if param in tolerance.keys():
tol = 0-tolerance[param]
if float(benchmark_value) - float(regression_value) >= tol:
return True
else:
return False
def findIdx(header, label):
for idx in range(len(header)):
if label == header[idx]:
return idx
else:
return -1
def diff_list(l1, l2):
return [x for x in l1 if x not in l2]
def parseCSV(csv_file, isBenchmark):
global note_worthy_statistics
map_out = dict()
csvOpener = open(csv_file, 'r')
csvData = csvOpener.read().split("\n")
headerInfo = csvData[0].split(",")
designNameIdx = findIdx(headerInfo, "design")
if isBenchmark:
note_worthy_statistics=diff_list(diff_list(diff_list(headerInfo,ignore_list),critical_statistics),base_configs)
remover = 0
size = len(base_configs)
while remover < size:
if base_configs[remover] not in headerInfo:
missing_configs.append("\nThis configuration "+base_configs[remover]+" doesn't exist in the sheets.")
base_configs.pop(remover)
remover -= 1
size -= 1
remover += 1
if designNameIdx == -1:
print("invalid report. No design names.")
exit(-1)
for i in range(1, len(csvData)):
if len(csvData[i]):
entry = csvData[i].split(",")
designName=entry[designNameIdx]
for idx in range(len(headerInfo)):
if idx != designNameIdx:
if designName not in map_out.keys():
map_out[designName] = dict()
if isBenchmark:
map_out[designName]["Status"] = "PASSED"
else:
map_out[designName]["Status"] = "----"
map_out[designName][headerInfo[idx]] = str(entry[idx])
return map_out
def configurationMismatch(benchmark, regression_results):
global configuration_mismatches
designList = list()
if regression_results_as_design_list_source:
designList = regression_results.keys()
else:
designList = benchmark.keys()
for design in designList:
output_report_list.append("\nComparing Configurations for: "+ design+"\n")
configuration_mismatches.append("\nComparing Configurations for: "+ design+"\n")
if design not in regression_results:
output_report_list.append("\tDesign "+ design+" Not Found in the provided regression sheet\n")
configuration_mismatches.append("\tDesign "+ design+" Not Found in the provided regression sheet\n")
continue
if design not in benchmark:
output_report_list.append("\tDesign "+ design+" Not Found in the provided benchmark sheet\n")
configuration_mismatches.append("\tDesign "+ design+" Not Found in the provided benchmark sheet\n")
continue
size_before = len(configuration_mismatches)
for config in base_configs:
if benchmark[design][config] == regression_results[design][config]:
output_report_list.append("\tConfiguration "+ config+" MATCH\n")
output_report_list.append("\t\tConfiguration "+ config+" value: "+ benchmark[design][config] +"\n")
else:
configuration_mismatches.append("\tConfiguration "+ config+" MISMATCH\n")
output_report_list.append("\tConfiguration "+ config+" MISMATCH\n")
configuration_mismatches.append("\t\tDesign "+ design + " Configuration "+ config+" BENCHMARK value: "+ benchmark[design][config] +"\n")
output_report_list.append("\t\tDesign "+ design + " Configuration "+ config+" BENCHMARK value: "+ benchmark[design][config] +"\n")
configuration_mismatches.append("\t\tDesign "+ design + " Configuration "+ config+" USER value: "+ regression_results[design][config] +"\n")
output_report_list.append("\t\tDesign "+ design + " Configuration "+ config+" USER value: "+ regression_results[design][config] +"\n")
if size_before == len(configuration_mismatches):
configuration_mismatches=configuration_mismatches[:-1]
def criticalMistmatch(benchmark, regression_results):
global testFail
global critical_mismatches
designList = list()
if regression_results_as_design_list_source:
designList = regression_results.keys()
else:
designList = benchmark.keys()
for design in designList:
output_report_list.append("\nComparing Critical Statistics for: "+ design+"\n")
critical_mismatches.append("\nComparing Critical Statistics for: "+ design+"\n")
if design not in regression_results:
testFail = True
benchmark[design]["Status"] = "NOT FOUND"
output_report_list.append("\tDesign "+ design+" Not Found in the provided regression sheet\n")
critical_mismatches.append("\tDesign "+ design+" Not Found in the provided regression sheet\n")
continue
if design not in benchmark:
testFail = False
output_report_list.append("\tDesign "+ design+" Not Found in the provided benchmark sheet\n")
critical_mismatches.append("\tDesign "+ design+" Not Found in the provided benchmark sheet\n")
continue
size_before = len(critical_mismatches)
for stat in critical_statistics:
if compare_vals(benchmark[design][stat],regression_results[design][stat],stat):
output_report_list.append("\tStatistic "+ stat+" MATCH\n")
output_report_list.append("\t\tStatistic "+ stat+" value: "+ benchmark[design][stat] +"\n")
else:
testFail = True
benchmark[design]["Status"] = "FAIL"
critical_mismatches.append("\tStatistic "+ stat+" MISMATCH\n")
output_report_list.append("\tStatistic "+ stat+" MISMATCH\n")
critical_mismatches.append("\t\tDesign "+ design + " Statistic "+ stat+" BENCHMARK value: "+ benchmark[design][stat] +"\n")
output_report_list.append("\t\tDesign "+ design + " Statistic "+ stat+" BENCHMARK value: "+ benchmark[design][stat] +"\n")
critical_mismatches.append("\t\tDesign "+ design + " Statistic "+ stat+" USER value: "+ regression_results[design][stat] +"\n")
output_report_list.append("\t\tDesign "+ design + " Statistic "+ stat+" USER value: "+ regression_results[design][stat] +"\n")
if len(critical_mismatches) == size_before:
critical_mismatches= critical_mismatches[:-1]
def noteWorthyMismatch(benchmark, regression_results):
designList = list()
if regression_results_as_design_list_source:
designList = regression_results.keys()
else:
designList = benchmark.keys()
for design in designList:
output_report_list.append("\nComparing Note Worthy Statistics for: "+ design+"\n")
if design not in regression_results:
output_report_list.append("\tDesign "+ design+" Not Found in the provided regression sheet\n")
continue
if design not in benchmark:
output_report_list.append("\tDesign "+ design+" Not Found in the provided benchmark sheet\n")
continue
for stat in note_worthy_statistics:
if benchmark[design][stat] == regression_results[design][stat] or benchmark[design][stat] == "-1":
output_report_list.append("\tStatistic "+ stat+" MATCH\n")
output_report_list.append("\t\tStatistic "+ stat+" value: "+ benchmark[design][stat] +"\n")
else:
output_report_list.append("\tStatistic "+ stat+" MISMATCH\n")
output_report_list.append("\t\tDesign "+ design + " Statistic "+ stat+" BENCHMARK value: "+ benchmark[design][stat] +"\n")
output_report_list.append("\t\tDesign "+ design + " Statistic "+ stat+" USER value: "+ regression_results[design][stat] +"\n")
benchmark = parseCSV(benchmark_file,1)
regression_results = parseCSV(regression_results_file,0)
configurationMismatch(benchmark,regression_results)
criticalMistmatch(benchmark,regression_results)
noteWorthyMismatch(benchmark, regression_results)
report = ""
if testFail:
report = "TEST FAILED\n"
else:
report = "TEST PASSED\n"
if len(missing_configs):
report += "\nThese configuration are missing:\n"
report += "".join(missing_configs)
if testFail:
report += "\n\nCritical Mismatches These are the reason why the test failed:\n\n"
report += "".join(critical_mismatches)
if testFail:
report += "\n\nConfiguration Mismatches. These are expected to cause differences between the results:\n\n"
report += "".join(configuration_mismatches)
report += "\nThis is the full generated report:\n"
report += "".join(output_report_list)
outputReportOpener = open(output_report_file, 'w')
outputReportOpener.write(report)
outputReportOpener.close()
def formNotFoundStatus(benchmark, regression_results):
for design in benchmark.keys():
if design not in regression_results:
benchmark[design]["Status"] = "NOT FOUND"
formNotFoundStatus(benchmark, regression_results)
# Open an Excel workbook
workbook = xlsxwriter.Workbook(output_xlsx_file)
# Set up a format
fail_format = workbook.add_format(properties={'bold': True, 'font_color': 'red'})
pass_format = workbook.add_format(properties={'bold': True, 'font_color': 'green'})
diff_format = workbook.add_format(properties={'font_color': 'blue'})
header_format = workbook.add_format(properties={'font_color': 'gray'})
benchmark_format = workbook.add_format(properties={'bold': True, 'font_color': 'navy'})
# Create a sheet
worksheet = workbook.add_worksheet('diff')
headerInfo = ['Owner','design', 'Status']
headerInfo.extend(critical_statistics)
headerInfo.extend(note_worthy_statistics)
headerInfo.extend(base_configs)
# Write the headers
for col_num, header in enumerate(headerInfo):
worksheet.write(0,col_num, header,header_format)
# Save the data from the OrderedDict into the excel sheet
idx = 0
while idx < len(benchmark):
worksheet.write(idx*2+1, 0, "Benchmark", benchmark_format)
worksheet.write(idx*2+2, 0, "User")
design = str(list(benchmark.keys())[idx])
if design not in regression_results:
for col_num, header in enumerate(headerInfo):
if header == 'Owner':
worksheet.write(idx*2+1, col_num, "Benchmark", benchmark_format)
worksheet.write(idx*2+2, col_num, "User")
continue
if header == 'design':
worksheet.write(idx*2+1, col_num, design)
worksheet.write(idx*2+2, col_num, design)
continue
worksheet.write(idx*2+1, col_num, benchmark[design][header])
else:
for col_num, header in enumerate(headerInfo):
if header == 'Owner':
worksheet.write(idx*2+1, col_num, "Benchmark", benchmark_format)
worksheet.write(idx*2+2, col_num, "User")
continue
if header == 'design':
worksheet.write(idx*2+1, col_num, design)
worksheet.write(idx*2+2, col_num, design)
continue
if header == 'Status':
if benchmark[design][header] == "PASSED":
worksheet.write(idx*2+1, col_num, benchmark[design][header],pass_format)
worksheet.write(idx*2+2, col_num, regression_results[design][header],pass_format)
else:
worksheet.write(idx*2+1, col_num, benchmark[design][header],fail_format)
worksheet.write(idx*2+2, col_num, regression_results[design][header],fail_format)
continue
if benchmark[design][header] != regression_results[design][header]:
if header in critical_statistics:
if compare_vals(benchmark[design][header],regression_results[design][header],header) == False:
worksheet.write(idx*2+1, col_num, benchmark[design][header],fail_format)
worksheet.write(idx*2+2, col_num, regression_results[design][header],fail_format)
else:
worksheet.write(idx*2+1, col_num, benchmark[design][header],pass_format)
worksheet.write(idx*2+2, col_num, regression_results[design][header],pass_format)
else:
worksheet.write(idx*2+1, col_num, benchmark[design][header],diff_format)
worksheet.write(idx*2+2, col_num, regression_results[design][header],diff_format)
else:
worksheet.write(idx*2+1, col_num, benchmark[design][header])
worksheet.write(idx*2+2, col_num, regression_results[design][header])
idx+=1
# Close the workbook
workbook.close()
| apache-2.0 |
nicproulx/mne-python | mne/viz/topo.py | 2 | 32096 | """Functions to plot M/EEG data on topo (one axes per channel)."""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
from functools import partial
from itertools import cycle
from copy import deepcopy
import numpy as np
from ..io.constants import Bunch
from ..io.pick import channel_type, pick_types
from ..utils import _clean_names, warn
from ..channels.layout import _merge_grad_data, _pair_grad_sensors, find_layout
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, COLORS, _draw_proj_checkbox,
add_background_image, plt_show, _setup_vmin_vmax,
DraggableColorbar)
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None):
"""Create iterator over channel positions.
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into:
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
return _iter_topography(info, layout, on_pick, fig, fig_facecolor,
axis_facecolor, axis_spinecolor, layout_scale)
def _iter_topography(info, layout, on_pick, fig, fig_facecolor='k',
axis_facecolor='k', axis_spinecolor='k',
layout_scale=None, unified=False, img=False):
"""Iterate over topography.
Has the same parameters as iter_topography, plus:
unified : bool
If False (default), multiple matplotlib axes will be used.
If True, a single axis will be constructed. The former is
useful for custom plotting, the latter for speed.
"""
from matplotlib import pyplot as plt, collections
if fig is None:
fig = plt.figure()
fig.set_facecolor(fig_facecolor)
if layout is None:
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
if unified:
under_ax = plt.axes([0, 0, 1, 1])
under_ax.set(xlim=[0, 1], ylim=[0, 1])
under_ax.axis('off')
axs = list()
for idx, name in iter_ch:
ch_idx = ch_names.index(name)
if not unified: # old, slow way
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ax._mne_ch_name = name
ax._mne_ch_idx = ch_idx
ax._mne_ax_face_color = axis_facecolor
yield ax, ch_idx
else:
ax = Bunch(ax=under_ax, pos=pos[idx], data_lines=list(),
_mne_ch_name=name, _mne_ch_idx=ch_idx,
_mne_ax_face_color=axis_facecolor)
axs.append(ax)
if unified:
under_ax._mne_axs = axs
# Create a PolyCollection for the axis backgrounds
verts = np.transpose([pos[:, :2],
pos[:, :2] + pos[:, 2:] * [1, 0],
pos[:, :2] + pos[:, 2:],
pos[:, :2] + pos[:, 2:] * [0, 1],
], [1, 0, 2])
if not img:
under_ax.add_collection(collections.PolyCollection(
verts, facecolor=axis_facecolor, edgecolor=axis_spinecolor,
linewidth=1.)) # Not needed for image plots.
for ax in axs:
yield ax, ax._mne_ch_idx
def _plot_topo(info, times, show_func, click_func=None, layout=None,
vmin=None, vmax=None, ylim=None, colorbar=None,
border='none', axis_facecolor='k', fig_facecolor='k',
cmap='RdBu_r', layout_scale=None, title=None, x_label=None,
y_label=None, font_color='w', unified=False, img=False):
"""Plot on sensor layout."""
import matplotlib.pyplot as plt
if layout.kind == 'custom':
layout = deepcopy(layout)
layout.pos[:, :2] -= layout.pos[:, :2].min(0)
layout.pos[:, :2] /= layout.pos[:, :2].max(0)
# prepare callbacks
tmin, tmax = times[[0, -1]]
click_func = show_func if click_func is None else click_func
on_pick = partial(click_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
fig = plt.figure()
if colorbar:
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg=fig_facecolor)
cb = fig.colorbar(sm, ax=ax)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
ax.axis('off')
my_topo_plot = _iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor,
unified=unified, img=img)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if title is not None:
plt.figtext(0.03, 0.9, title, color=font_color, fontsize=19)
return fig
def _plot_topo_onpick(event, show_func):
"""Onpick callback that shows a single channel in a new figure."""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
if event.inaxes is None or (not hasattr(orig_ax, '_mne_ch_idx') and
not hasattr(orig_ax, '_mne_axs')):
return
import matplotlib.pyplot as plt
try:
if hasattr(orig_ax, '_mne_axs'): # in unified, single-axes mode
x, y = event.xdata, event.ydata
for ax in orig_ax._mne_axs:
if x >= ax.pos[0] and y >= ax.pos[1] and \
x <= ax.pos[0] + ax.pos[2] and \
y <= ax.pos[1] + ax.pos[3]:
orig_ax = ax
break
else:
return
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
# allow custom function to override parameters
show_func(ax, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise
def _compute_scalings(bn, xlim, ylim):
"""Compute scale factors for a unified plot."""
if isinstance(ylim[0], (tuple, list, np.ndarray)):
ylim = (ylim[0][0], ylim[1][0])
pos = bn.pos
bn.x_s = pos[2] / (xlim[1] - xlim[0])
bn.x_t = pos[0] - bn.x_s * xlim[0]
bn.y_s = pos[3] / (ylim[1] - ylim[0])
bn.y_t = pos[1] - bn.y_s * ylim[0]
def _check_vlim(vlim):
"""Check the vlim."""
return not np.isscalar(vlim) and vlim is not None
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, x_label=None, y_label=None,
colorbar=False, cmap=('RdBu_r', True), yscale='auto'):
"""Show time-frequency map as two-dimensional image."""
from matplotlib import pyplot as plt, ticker
from matplotlib.widgets import RectangleSelector
if yscale not in ['auto', 'linear', 'log']:
raise ValueError("yscale should be either 'auto', 'linear', or 'log'"
", got {}".format(yscale))
cmap, interactive_cmap = cmap
times = np.linspace(tmin, tmax, num=tfr[ch_idx].shape[1])
# test yscale
if yscale == 'log' and not freq[0] > 0:
raise ValueError('Using log scale for frequency axis requires all your'
' frequencies to be positive (you cannot include'
' the DC component (0 Hz) in the TFR).')
if len(freq) < 2 or freq[0] == 0:
yscale = 'linear'
elif yscale != 'linear':
ratio = freq[1:] / freq[:-1]
if yscale == 'auto':
if freq[0] > 0 and np.allclose(ratio, ratio[0]):
yscale = 'log'
else:
yscale = 'linear'
# compute bounds between time samples
time_diff = np.diff(times) / 2. if len(times) > 1 else [0.0005]
time_lims = np.concatenate([[times[0] - time_diff[0]], times[:-1] +
time_diff, [times[-1] + time_diff[-1]]])
# the same for frequency - depending on whether yscale is log
if yscale == 'linear':
freq_diff = np.diff(freq) / 2. if len(freq) > 1 else [0.5]
freq_lims = np.concatenate([[freq[0] - freq_diff[0]], freq[:-1] +
freq_diff, [freq[-1] + freq_diff[-1]]])
else:
log_freqs = np.concatenate([[freq[0] / ratio[0]], freq,
[freq[-1] * ratio[0]]])
freq_lims = np.sqrt(log_freqs[:-1] * log_freqs[1:])
# construct a time-frequency bounds grid
time_mesh, freq_mesh = np.meshgrid(time_lims, freq_lims)
img = ax.pcolormesh(time_mesh, freq_mesh, tfr[ch_idx], cmap=cmap,
vmin=vmin, vmax=vmax)
# limits, yscale and yticks
ax.set_xlim(time_lims[0], time_lims[-1])
if ylim is None:
ylim = (freq_lims[0], freq_lims[-1])
ax.set_ylim(ylim)
if yscale == 'log':
ax.set_yscale('log')
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_locator(ticker.NullLocator()) # get rid of minor ticks
tick_vals = freq[np.unique(np.linspace(
0, len(freq) - 1, 12).round().astype('int'))]
ax.set_yticks(tick_vals)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
if isinstance(colorbar, DraggableColorbar):
cbar = colorbar.cbar # this happens with multiaxes case
else:
cbar = plt.colorbar(mappable=img)
if interactive_cmap:
ax.CB = DraggableColorbar(cbar, img)
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
def _imshow_tfr_unified(bn, ch_idx, tmin, tmax, vmin, vmax, onselect,
ylim=None, tfr=None, freq=None, vline=None,
x_label=None, y_label=None, colorbar=False,
picker=True, cmap='RdBu_r', title=None, hline=None):
"""Show multiple tfrs on topo using a single axes."""
_compute_scalings(bn, (tmin, tmax), (freq[0], freq[-1]))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax,
bn.y_t + bn.y_s * freq[0], bn.y_t + bn.y_s * freq[-1])
data_lines.append(ax.imshow(tfr[ch_idx], clip_on=True, clip_box=bn.pos,
extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap))
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False, hline=None):
"""Show time series on topo split across multiple axes."""
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color=color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color=color_)
if vline:
for x in vline:
plt.axvline(x, color='w', linewidth=0.5)
if hline:
for y in hline:
plt.axhline(y, color='w', linewidth=0.5)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
if isinstance(y_label, list):
plt.ylabel(y_label[ch_idx])
else:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _plot_timeseries_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim, data,
color, times, vline=None, x_label=None,
y_label=None, colorbar=False, hline=None):
"""Show multiple time series on topo using a single axes."""
import matplotlib.pyplot as plt
if not (ylim and not any(v is None for v in ylim)):
ylim = np.array([np.min(data), np.max(data)])
# Translation and scale parameters to take data->under_ax normalized coords
_compute_scalings(bn, (tmin, tmax), ylim)
pos = bn.pos
data_lines = bn.data_lines
ax = bn.ax
# XXX These calls could probably be made faster by using collections
for data_, color_ in zip(data, color):
data_lines.append(ax.plot(
bn.x_t + bn.x_s * times, bn.y_t + bn.y_s * data_[ch_idx],
color=color_, clip_on=True, clip_box=pos)[0])
if vline:
vline = np.array(vline) * bn.x_s + bn.x_t
ax.vlines(vline, pos[1], pos[1] + pos[3], color='w', linewidth=0.5)
if hline:
hline = np.array(hline) * bn.y_s + bn.y_t
ax.hlines(hline, pos[0], pos[0] + pos[2], color='w', linewidth=0.5)
if x_label is not None:
ax.text(pos[0] + pos[2] / 2., pos[1], x_label,
horizontalalignment='center', verticalalignment='top')
if y_label is not None:
y_label = y_label[ch_idx] if isinstance(y_label, list) else y_label
ax.text(pos[0], pos[1] + pos[3] / 2., y_label,
horizontalignment='right', verticalalignment='middle',
rotation=90)
if colorbar:
plt.colorbar()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, data=None,
epochs=None, sigma=None, order=None, scalings=None,
vline=None, x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r'):
"""Plot erfimage on sensor topography."""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy() * scalings[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
img = ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap, interpolation='nearest')
ax = plt.gca()
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None, order=None,
scalings=None, vline=None, x_label=None,
y_label=None, colorbar=False, cmap='RdBu_r'):
"""Plot erfimage topography using a single axis."""
from scipy import ndimage
_compute_scalings(bn, (tmin, tmax), (0, len(epochs.events)))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax, bn.y_t,
bn.y_t + bn.y_s * len(epochs.events))
this_data = data[:, ch_idx, :].copy() * scalings[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
data_lines.append(ax.imshow(this_data, extent=extent, aspect='auto',
origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap,
interpolation='nearest'))
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=(0.,), hline=(0.,), fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
merge_grads=False, legend=True, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad. If None,
the ylim parameter for each channel is determined by the maximum
absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
hline : list of floats | None
The values at which to show a horizontal line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | string | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
import matplotlib.pyplot as plt
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + COLORS
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warn('More evoked objects than colors available. You should pass '
'a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all((e.times == times).all() for e in evoked):
raise ValueError('All evoked.times must be the same')
evoked = [e.copy() for e in evoked]
info = evoked[0].info
ch_names = evoked[0].ch_names
scalings = _handle_default('scalings', scalings)
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if merge_grads:
picks = _pair_grad_sensors(info, topomap_coords=False)
chs = list()
for pick in picks[::2]:
ch = info['chs'][pick]
ch['ch_name'] = ch['ch_name'][:-1] + 'X'
chs.append(ch)
info['chs'] = chs
info['bads'] = list() # bads dropped on pair_grad_sensors
info._update_redundant()
info._check_consistency()
new_picks = list()
for e in evoked:
data = _merge_grad_data(e.data[picks]) * scalings['grad']
e.data = data
new_picks.append(range(len(data)))
picks = new_picks
types_used = ['grad']
y_label = 'RMS amplitude (%s)' % _handle_default('units')['grad']
if layout is None:
layout = find_layout(info)
if not merge_grads:
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# remove possible reference meg channels
types_used = set.difference(types_used, set('ref_meg'))
# one check for all vendors
meg_types = set(('mag', 'grad'))
is_meg = len(set.intersection(types_used, meg_types)) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, exclude=[],
**types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
for e in evoked:
for pick, ch_type in zip(picks, types_used):
e.data[pick] = e.data[pick] * scalings[ch_type]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
# Y labels for picked plots must be reconstructed
y_label = ['Amplitude (%s)' % _handle_default('units')[channel_type(
info, ch_idx)] for ch_idx in range(len(chs_in_layout))]
if ylim is None:
def set_ylim(x):
return np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise TypeError('ylim must be None or a dict. Got %s.' % type(ylim))
data = [e.data for e in evoked]
show_func = partial(_plot_timeseries_unified, data=data, color=color,
times=times, vline=vline, hline=hline)
click_func = partial(_plot_timeseries, data=data, color=color, times=times,
vline=vline, hline=hline)
fig = _plot_topo(info=info, times=times, show_func=show_func,
click_func=click_func, layout=layout,
colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border,
fig_facecolor=fig_facecolor, font_color=font_color,
axis_facecolor=axis_facecolor, title=title,
x_label='Time (s)', y_label=y_label, unified=True)
add_background_image(fig, fig_background)
if legend is not False:
legend_loc = 0 if legend is True else legend
labels = [e.comment if e.comment else 'Unknown' for e in evoked]
legend = plt.legend(labels, loc=legend_loc,
prop={'size': 10})
legend.get_frame().set_facecolor(axis_facecolor)
txts = legend.get_texts()
for txt, col in zip(txts, color):
txt.set_color(col)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo_proj,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_update_evoked_topo_proj(params, bools):
"""Update topo sensor plots."""
evokeds = [e.copy() for e in params['evokeds']]
fig = params['fig']
projs = [proj for proj, b in zip(params['projs'], bools) if b]
params['proj_bools'] = bools
for e in evokeds:
e.add_proj(projs, remove_existing=True)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
for ax in fig.axes[0]._mne_axs:
for line, evoked in zip(ax.data_lines, evokeds):
line.set_ydata(ax.y_t + ax.y_s * evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k',
fig_background=None, font_color='w', show=True):
"""Plot Event Related Potential / Fields image on topographies.
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
scalings = _handle_default('scalings', scalings)
data = epochs.get_data()
scale_coeffs = list()
for idx in range(epochs.info['nchan']):
ch_type = channel_type(epochs.info, idx)
scale_coeffs.append(scalings.get(ch_type, 1))
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if layout is None:
layout = find_layout(epochs.info)
show_func = partial(_erfimage_imshow_unified, scalings=scale_coeffs,
order=order, data=data, epochs=epochs, sigma=sigma,
cmap=cmap)
erf_imshow = partial(_erfimage_imshow, scalings=scale_coeffs, order=order,
data=data, epochs=epochs, sigma=sigma, cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
click_func=erf_imshow, show_func=show_func, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor, font_color=font_color,
border=border, x_label='Time (s)', y_label='Epoch',
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
| bsd-3-clause |
staircase27/Tower-Defence | TDlib/draw.py | 1 | 6398 | import numpy as np
import matplotlib
matplotlib.use("WXAgg")
import wx
import pylab as pl
import matplotlib.patches as p
import time
def partial(fn, *cargs, **ckwargs):
def call_fn(*fargs, **fkwargs):
d = ckwargs.copy()
d.update(fkwargs)
return fn(*(cargs + fargs), **d)
return call_fn
def create(x_min,x_max,y_min,y_max):
create.i+=1;
fig=pl.figure(create.i)
ax=fig.add_subplot(111)
ax.set_xlim(x_min,x_max);
ax.set_ylim(y_min,y_max);
return fig,ax;
create.i=-1;
class Update:
colours=["#000000","#0000FF","#000000","#0000FF","#FF0000","#FF66FF","#FF0000","#FF66FF"]
weights=["ultralight","ultralight","heavy","heavy","ultralight","ultralight","heavy","heavy"]
def __init__(self,fig,ax):
self.lables=None;
self.fig=fig;
self.ax=ax;
def __call__(self,a):
fig=self.fig
ax=self.ax;
if self.lables==None:
self.lables={};
for row in a:
x,y,c,n,r=row[0:5]
string="X"
if n<100000:
string=str(n)
rstring="X"
if r<100000:
rstring=str(r)
string=string+" "+rstring;
self.lables[(x,y)]=ax.text(x,y,string,color=self.colours[c],weight=self.weights[c],va="center",ha="center")
fig.canvas.draw_idle()
else:
for row in a:
x,y,c,n,r=row[0:5]
string="X"
if n<100000:
string=str(n)
rstring="X"
if r<100000:
rstring=str(r)
string=string+" "+rstring;
if self.lables[(x,y)].get_text()!=string:
self.lables[(x,y)].set_text(string);
if self.lables[(x,y)].get_color()!=self.colours[c]:
self.lables[(x,y)].set_color(self.colours[c]);
if self.lables[(x,y)].get_weight()!=self.weights[c]:
self.lables[(x,y)].set_weight(self.weights[c]);
fig.canvas.draw_idle()
class MasterUpdate:
def __init__(self,basename,index,create):
self.basename=basename;
self.index=index;
self.create=create;
self.updates={}
self.playing=False;
self.speed=1;
self.next=0;
self.draw()
def __call__(self,evt):
if isinstance(evt,wx.IdleEvent):
if not (self.playing and time.time()>self.next):
evt.RequestMore()
else:
self.next+=self.speed;
self.index+=1;
self.draw();
elif isinstance(evt,wx.KeyEvent):
if evt.GetUniChar()==32:
self.playing=not self.playing;
self.next=time.time();
elif evt.GetUniChar()==189:
self.speed*=1.1
print "Speed ",self.speed
elif evt.GetUniChar()==187:
self.speed/=1.1
print "Speed ",self.speed
elif evt.GetUniChar()==190:
self.index+=1;
self.draw();
elif evt.GetUniChar()==188:
self.index-=1;
self.draw();
else:
print evt.GetUniChar()
evt.Skip();
return;
def draw(self):
f=open(self.basename+"_"+str(self.index)+".tdd");
key=f.readline()[0];
a=np.loadtxt(f,dtype=np.int32)
try:
self.updates[key](a);
except KeyError:
self.updates[key]=Update(*self.create());
self.updates[key](a);
print self.index
def main():
##args are path,start_index,x_max,x_min,y_max,y_min
import sys;
if len(sys.argv)>=2:
path=sys.argv[1]
if len(sys.argv)>=3:
x_max=float(sys.argv[2]);
else:
index=0;
if len(sys.argv)>=4:
x_max=float(sys.argv[3]);
else:
x_max=9;
if len(sys.argv)>=5:
x_min=float(sys.argv[4]);
else:
x_min=-1;
if len(sys.argv)>=6:
y_max=float(sys.argv[5]);
else:
y_max=x_max;
if len(sys.argv)>=7:
y_min=float(sys.argv[6]);
else:
y_min=x_min;
else:
import os;
import re;
pattern=re.compile("^(.*)_(\d+).tdd$")
paths=[match.group(1) for match in (pattern.match(path) for path in os.listdir(".")) if not match==None]
paths=dict(zip(paths,paths)).keys()
paths.sort();
paths=dict(zip(range(len(paths)),paths))
print "enter the base path to process or select one of the below"
for i,p in paths.iteritems():
print i,p
path=raw_input(": ");
try:
path=int(path)
path=paths[path]
except ValueError:
pass
except KeyError:
pass
index=raw_input("enter the starting index to plot from or leave blank for '0': ")
try:
index=int(index);
except ValueError:
index=0;
x_max=raw_input("enter the maximum x for the graphs or leave blank for '9': ")
try:
x_max=float(x_max);
except ValueError:
x_max=9;
x_min=raw_input("enter the minimum x for the graphs or leave blank for '-1': ")
try:
x_min=float(x_min);
except ValueError:
x_min=-1;
y_max=raw_input("enter the maximum x for the graphs or leave blank for '"+str(x_max)+"': ")
try:
y_max=float(y_max);
except ValueError:
y_max=x_max;
y_min=raw_input("enter the minimum x for the graphs or leave blank for '"+str(x_min)+"': ")
try:
y_min=float(y_min);
except ValueError:
y_min=x_min;
pl.ion()
master=MasterUpdate(path,index,partial(create,x_min,x_max,y_min,y_max));
wx.EVT_IDLE(wx.GetApp(), master)
wx.EVT_KEY_DOWN(wx.GetApp(), master)
pl.show()
if __name__=="__main__":
main(); | gpl-3.0 |
chichilalescu/pyNT | pyNT/test.py | 1 | 3676 | #######################################################################
# #
# Copyright 2014 Cristian C Lalescu #
# #
# This file is part of pyNT. #
# #
# pyNT is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. #
# #
# pyNT is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with pyNT. If not, see <http://www.gnu.org/licenses/> #
# #
#######################################################################
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from wiener import Wiener
from sde import SDE
from ode import ODE
test_sde = True
test_ode = False
if test_sde:
x = sp.Symbol('x')
bla = SDE(
x = [x],
a = [x],
b = [[x/2, sp.sin(x)/3]])
bla.get_evdt_vs_M(
fig_name = 'figs/extra_tst',
ntraj = 64,
X0 = np.ones(1,).astype(np.float),
h0 = .5,
exp_range = range(8))
bla = SDE(
x = [x],
a = [x],
b = [[.5, .25]])
bla.get_evdt_vs_M(
fig_name = 'figs/add_evdt',
ntraj = 64,
X0 = np.ones(1,).astype(np.float),
h0 = .5,
exp_range = range(8),
solver = ['Taylor_2p0_additive', 'explicit_1p5_additive'])
c = 0.0
v = sp.Symbol('v')
u = x**2 * (x**2 - 1 + c*x)
bla = SDE(
x = [x, v],
a = [v, -u.diff(x) - v],
b = [[.01, .1], [.95, .3]])
bla.get_evdt_vs_M(
fig_name = 'figs/dwell_evdt',
ntraj = 32,
X0 = np.array([0., .0]).astype(np.float),
h0 = .5,
solver = ['Taylor_2p0_additive', 'explicit_1p5_additive'],
exp_range = range(8))
if test_ode:
x = sp.Symbol('x')
y = sp.Symbol('y')
z = sp.Symbol('z')
rho = 28.
sigma = 10.
beta = 8./3
lorenz_rhs = [sigma*(y - x),
x*(rho - z) - y,
x*y - beta*z]
bla = ODE(
x = [x, y, z],
f = lorenz_rhs)
X0 = 10*np.random.random((3, 128))
fig = plt.figure(figsize = (6,6))
ax = fig.add_subplot(111)
for solver in [['Euler', 'Taylor2'],
['Heun', 'Taylor2'],
['cRK', 'Taylor4']]:
evdt = bla.get_evdt(
X0 = X0,
solver = solver)
ax.errorbar(
evdt[:, 0],
evdt[:, 2],
yerr = [evdt[:, 1], evdt[:, 3]],
label = '{0} vs {1}'.format(solver[0], solver[1]))
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(loc = 'best')
fig.savefig('figs/ode_evdt.pdf', format = 'pdf')
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.