repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
acmaheri/sms-tools | software/models_interface/hprModel_function.py | 18 | 3728 | # function to call the main analysis/synthesis functions in software/models/hprModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import hprModel as HPR
import stft as STFT
def main(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01):
"""
Perform analysis/synthesis using the harmonic plus residual model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# find harmonics and residual
hfreq, hmag, hphase, xr = HPR.hprModelAnal(x, fs, w, N, H, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope)
# compute spectrogram of residual
mXr, pXr = STFT.stftAnal(xr, fs, w, N, H)
# synthesize hpr model
y, yh = HPR.hprModelSynth(hfreq, hmag, hphase, xr, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_sines.wav'
outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_residual.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel.wav'
# write sounds files for harmonics, residual, and the sum
UF.wavwrite(yh, fs, outputFileSines)
UF.wavwrite(xr, fs, outputFileResidual)
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrogram of residual
plt.subplot(3,1,2)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
# plot harmonic frequencies on residual spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time(s)')
plt.ylabel('frequency(Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + residual spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
hdmetor/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 44 | 17033 | import tempfile
import shutil
import os.path as op
import warnings
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
temp_folder = tempfile.mkdtemp()
try:
fpath = op.join(temp_folder, 'data.pkl')
joblib.dump(splitted_data, fpath)
X_train, X_test, y_train, y_test = joblib.load(fpath, mmap_mode='r')
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
finally:
# try to release the mmap file handle in time to be able to delete
# the temporary folder under windows
del X_train, X_test, y_train, y_test
try:
shutil.rmtree(temp_folder)
except shutil.WindowsError:
warnings.warn("Could not delete temporary folder %s" % temp_folder)
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/ensemble/forest.py | 6 | 62682 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| mit |
qrqiuren/sms-tools | lectures/04-STFT/plots-code/time-freq-compromise.py | 19 | 1255 | import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft
import math
(fs, x) = UF.wavread('../../../sounds/piano.wav')
plt.figure(1, figsize=(9.5, 6))
w = np.hamming(256)
N = 256
H = 128
mX1, pX1 = STFT.stftAnal(x, fs, w, N, H)
plt.subplot(211)
numFrames = int(mX1[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX1[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX1))
plt.title('mX (piano.wav), M=256, N=256, H=128')
plt.autoscale(tight=True)
w = np.hamming(1024)
N = 1024
H = 128
mX2, pX2 = STFT.stftAnal(x, fs, w, N, H)
plt.subplot(212)
numFrames = int(mX2[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX2[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX2))
plt.title('mX (piano.wav), M=1024, N=1024, H=128')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('time-freq-compromise.png')
plt.show()
| agpl-3.0 |
schets/scikit-learn | sklearn/ensemble/weight_boosting.py | 26 | 40570 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted)
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
dimmddr/roadSignsNN | utils.py | 1 | 3844 | import matplotlib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import settings
def analyse_sign_frame_size_fluctuations(annotation_path, output_file):
with open(output_file, 'w') as outp:
raw_data = pd.read_csv(annotation_path, delimiter=';')
outp.write("Analyse frame size fluctuations.\n")
data = pd.DataFrame()
data['width'] = raw_data['Lower right corner X'] - raw_data['Upper left corner X']
data['height'] = raw_data['Lower right corner Y'] - raw_data['Upper left corner Y']
outp.write("Minimum width = {}, minimum height = {}\n".format(data['width'].min(), data['height'].min()))
outp.write("Maximum width = {}, maximum height = {}\n".format(data['width'].max(), data['height'].max()))
matplotlib.rcdefaults()
matplotlib.rcParams['font.family'] = 'fantasy'
matplotlib.rcParams['font.fantasy'] = 'Times New Roman', 'Ubuntu', 'Arial', 'Tahoma', 'Calibri'
matplotlib.rcParams.update({'font.size': 18})
hist, bins = np.histogram(data['width'], bins=range(data['width'].min(), data['width'].max(), 5))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.title("Ширина дорожных знаков")
plt.xlabel("Ширина")
plt.ylabel("Сколько раз встречалась")
plt.xticks(bins, bins)
plt.show()
hist, bins = np.histogram(data['height'], bins=range(data['width'].min(), data['width'].max(), 5))
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.title("Высота дорожных знаков")
plt.xlabel("Высота")
plt.ylabel("Сколько раз встречалась")
plt.xticks(bins, bins)
plt.show()
# Annotation tag;
# Upper left corner X;Upper left corner Y;Lower right corner X;Lower right corner Y;Occluded,On another road
def nms(boxes):
if 0 == len(boxes):
return []
boxes = np.array(boxes)
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
xmin = boxes[:, 0]
ymin = boxes[:, 1]
xmax = boxes[:, 2]
ymax = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (xmax - xmin + 1) * (ymax - ymin + 1)
idxs = np.argsort(ymax)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(xmin[i], xmin[idxs[:last]])
yy1 = np.maximum(ymin[i], ymin[idxs[:last]])
xx2 = np.minimum(xmax[i], xmax[idxs[:last]])
yy2 = np.minimum(ymax[i], ymax[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > settings.COVER_PERCENT)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick], pick
| mit |
yonglehou/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
KevinNJ/Projects | Short Time Fourier Transform/archive/half_cosine.py | 1 | 1241 | __author__ = 'knelson'
import numpy as np
import matplotlib.pyplot as plt
def half_cosine(window, total_segments, hop_size):
win_length = window.shape
total_length = win_length + hop_size*(total_segments-1)
result = np.zeros(total_length)
for i in xrange(total_segments):
current_loc = i*hop_size
result[current_loc:current_loc+win_length] += window
plt.figure()
plt.plot(result)
plt.ylim([0, 1.2])
plt.show()
if __name__ == '__main__':
win_length = 500
overlap = 1 - 0.5
segments = 10
total_length = np.int32(win_length + (win_length * overlap)*(segments-1))
hop_length = np.int32(win_length * overlap)
print 'win length:', win_length
print 'overlap_fac:', overlap
print 'segments:', segments
print 'total_length:', total_length
print 'hop_length:', hop_length
print ''
win = np.hanning(win_length) * overlap * 2
result = np.zeros(total_length)
for i in xrange(segments):
current_loc = i*hop_length
print 'current location:', current_loc
result[current_loc:current_loc+win_length] += win
print ' '.join(map(lambda x: '%.4f'%x, result))
plt.plot(result)
plt.ylim([0, 1.2])
plt.show()
| mit |
wxchan/LightGBM | python-package/lightgbm/compat.py | 2 | 3015 | # coding: utf-8
# pylint: disable = C0103
"""Compatibility"""
from __future__ import absolute_import
import inspect
import sys
import numpy as np
is_py3 = (sys.version_info[0] == 3)
"""compatibility between python2 and python3"""
if is_py3:
string_type = str
numeric_types = (int, float, bool)
integer_types = (int, )
range_ = range
def argc_(func):
"""return number of arguments of a function"""
return len(inspect.signature(func).parameters)
else:
string_type = basestring
numeric_types = (int, long, float, bool)
integer_types = (int, long)
range_ = xrange
def argc_(func):
"""return number of arguments of a function"""
return len(inspect.getargspec(func).args)
"""json"""
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
def json_default_with_numpy(obj):
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
pass
class DataFrame(object):
pass
"""sklearn"""
try:
from sklearn.base import BaseEstimator
from sklearn.base import RegressorMixin, ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import deprecated
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_X_y, check_array, check_consistent_length
try:
from sklearn.model_selection import StratifiedKFold, GroupKFold
from sklearn.exceptions import NotFittedError
except ImportError:
from sklearn.cross_validation import StratifiedKFold, GroupKFold
from sklearn.utils.validation import NotFittedError
SKLEARN_INSTALLED = True
_LGBMModelBase = BaseEstimator
_LGBMRegressorBase = RegressorMixin
_LGBMClassifierBase = ClassifierMixin
_LGBMLabelEncoder = LabelEncoder
LGBMDeprecated = deprecated
LGBMNotFittedError = NotFittedError
_LGBMStratifiedKFold = StratifiedKFold
_LGBMGroupKFold = GroupKFold
_LGBMCheckXY = check_X_y
_LGBMCheckArray = check_array
_LGBMCheckConsistentLength = check_consistent_length
_LGBMCheckClassificationTargets = check_classification_targets
except ImportError:
SKLEARN_INSTALLED = False
_LGBMModelBase = object
_LGBMClassifierBase = object
_LGBMRegressorBase = object
_LGBMLabelEncoder = None
# LGBMDeprecated = None Don't uncomment it because it causes error without installed sklearn
LGBMNotFittedError = ValueError
_LGBMStratifiedKFold = None
_LGBMGroupKFold = None
_LGBMCheckXY = None
_LGBMCheckArray = None
_LGBMCheckConsistentLength = None
_LGBMCheckClassificationTargets = None
| mit |
talonchandler/dipsim | notes/2017-09-28-anisotropy/figures/anisotropy.py | 1 | 1465 | from dipsim import multiframe, util, detector, illuminator, microscope, util
import numpy as np
import matplotlib.pyplot as plt
import os; import time; start = time.time(); print('Running...')
# Compute anisotropy
def anisotropy(phi):
print('Computing microscope: ')
# y-polarized input from x optical axis
ill = illuminator.Illuminator(illum_type='sheet', theta_optical_axis=np.pi/2, phi_pol=np.pi/2)
# perp: x-polarized detection
# par: y-polarized detection
det_perp = detector.Detector(det_type='polarized', na=0.001, phi_pol=np.pi/2)
det_par = detector.Detector(det_type='polarized', na=0.001, phi_pol=0)
m_perp = microscope.Microscope(illuminator=ill, detector=det_perp, max_photons=1e10)
m_par = microscope.Microscope(illuminator=ill, detector=det_par, max_photons=1e10)
I_perp = m_perp.calc_intensity((np.pi/2, phi), kappa=0.0, epsrel=1e-8)
I_par = m_par.calc_intensity((np.pi/2, phi), kappa=0.0, epsrel=1e-8)
r = (I_par - I_perp)/(I_par + 2*I_perp)
print(I_perp, I_par, r)
if I_par + 2*I_perp == 0:
print("*")
return 0.0
else:
return r
f, ax = plt.subplots(1, 1, figsize=(5, 5))
x = list(np.linspace(-np.pi, np.pi, 100))
v_aniso = np.vectorize(anisotropy)
r_vect = v_aniso(x)
ax.plot(x, r_vect, '-k')
ax.set_xlabel('$\phi$')
ax.set_ylabel('$r$')
f.savefig('anisotropy.pdf', dpi=300)
print('Total time: '+str(np.round(time.time() - start, 2)))
os.system('say "done"')
| mit |
Tobychev/tardis | tardis/plasma/properties/nlte.py | 3 | 9723 | import logging
import os
import numpy as np
import pandas as pd
from tardis.plasma.properties.base import (PreviousIterationProperty,
ProcessingPlasmaProperty)
from tardis.plasma.properties import PhiSahaNebular, PhiSahaLTE
__all__ = ['PreviousElectronDensities', 'PreviousBetaSobolev',
'HeliumNLTE', 'HeliumNumericalNLTE']
logger = logging.getLogger(__name__)
class PreviousElectronDensities(PreviousIterationProperty):
"""
Attributes
----------
previous_electron_densities : The values for the electron densities converged upon in the previous iteration.
"""
outputs = ('previous_electron_densities',)
def set_initial_value(self, kwargs):
initial_value = np.ones(len(kwargs['abundance'].columns))*1000000.0
self._set_initial_value(initial_value)
class PreviousBetaSobolev(PreviousIterationProperty):
"""
Attributes
----------
previous_beta_sobolev : The beta sobolev values converged upon in the previous iteration.
"""
outputs = ('previous_beta_sobolev',)
def set_initial_value(self, kwargs):
try:
lines = len(kwargs['atomic_data'].lines)
except:
lines = len(kwargs['atomic_data']._lines)
initial_value = np.ones((lines,
len(kwargs['abundance'].columns)))
self._set_initial_value(initial_value)
class HeliumNLTE(ProcessingPlasmaProperty):
outputs = ('helium_population',)
def calculate(self, level_boltzmann_factor, electron_densities,
ionization_data, beta_rad, g, g_electron, w, t_rad, t_electrons,
delta, zeta_data, number_density, partition_function):
"""
Updates all of the helium level populations according to the helium NLTE recomb approximation.
"""
helium_population = level_boltzmann_factor.ix[2].copy()
# He I excited states
he_one_population = self.calculate_helium_one(g_electron, beta_rad,
ionization_data, level_boltzmann_factor, electron_densities, g, w)
helium_population.ix[0].update(he_one_population)
#He I metastable states
helium_population.ix[0,1] *= (1 / w)
helium_population.ix[0,2] *= (1 / w)
#He I ground state
helium_population.ix[0,0] = 0.0
#He II excited states
he_two_population = level_boltzmann_factor.ix[2,1].mul(
(g.ix[2,1].ix[0]**(-1)))
helium_population.ix[1].update(he_two_population)
#He II ground state
helium_population.ix[1,0] = 1.0
#He III states
helium_population.ix[2,0] = self.calculate_helium_three(t_rad, w,
zeta_data, t_electrons, delta, g_electron, beta_rad,
ionization_data, electron_densities, g)
unnormalised = helium_population.sum()
normalised = helium_population.mul(number_density.ix[2] / unnormalised)
helium_population.update(normalised)
return helium_population
@staticmethod
def calculate_helium_one(g_electron, beta_rad, ionization_data,
level_boltzmann_factor, electron_densities, g, w):
"""
Calculates the He I level population values, in equilibrium with the He II ground state.
"""
return level_boltzmann_factor.ix[2,0].mul(
g.ix[2,0], axis=0) * (1./(2*g.ix[2,1,0])) * \
(1/g_electron) * (1/(w**2)) * np.exp(
ionization_data.ionization_energy.ix[2,1] * beta_rad) * \
electron_densities
@staticmethod
def calculate_helium_three(t_rad, w, zeta_data, t_electrons, delta,
g_electron, beta_rad, ionization_data, electron_densities, g):
"""
Calculates the He III level population values.
"""
zeta = PhiSahaNebular.get_zeta_values(zeta_data, 2, t_rad)[1]
he_three_population = (2 / electron_densities) * \
(float(g.ix[2,2,0])/g.ix[2,1,0]) * g_electron * \
np.exp(-ionization_data.ionization_energy.ix[2,2] * beta_rad) \
* w * (delta.ix[2,2] * zeta + w * (1. - zeta)) * \
(t_electrons / t_rad) ** 0.5
class HeliumNumericalNLTE(ProcessingPlasmaProperty):
'''
IMPORTANT: This particular property requires a specific numerical NLTE
solver and a specific atomic dataset (neither of which are distributed
with Tardis) to work.
'''
outputs = ('helium_population',)
def calculate(self, ion_number_density, electron_densities, t_electrons, w,
lines, j_blues, levels, level_boltzmann_factor, t_rad,
zeta_data, g_electron, delta, partition_function, ionization_data,
beta_rad, g):
logger.info('Performing numerical NLTE He calculations.')
if len(j_blues)==0:
return None
heating_rate_data = np.loadtxt(
self.plasma_parent.heating_rate_data_file, unpack=True)
#Outputting data required by SH module
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/shellconditions_{}.txt'.format(zone),
'w') as output_file:
output_file.write(ion_number_density.ix[2].sum()[zone])
output_file.write(electron_densities[zone])
output_file.write(t_electrons[zone])
output_file.write(heating_rate_data[zone])
output_file.write(w[zone])
output_file.write(self.plasma_parent.time_explosion)
output_file.write(t_rad[zone])
output_file.write(self.plasma_parent.v_inner[zone])
output_file.write(self.plasma_parent.v_outer[zone])
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/abundances_{}.txt'.format(zone), 'w') as \
output_file:
for element in range(1,31):
try:
number_density = ion_number_density[zone].ix[
element].sum()
except:
number_density = 0.0
output_file.write(number_density)
helium_lines = lines[lines['atomic_number']==2]
helium_lines = helium_lines[helium_lines['ion_number']==0]
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/discradfield_{}.txt'.format(zone), 'w') \
as output_file:
j_blues = pd.DataFrame(j_blues, index=lines.index)
helium_j_blues = j_blues[zone].ix[helium_lines.index]
for value in helium_lines.index:
if (helium_lines.level_number_lower.ix[value]<35):
output_file.write(
int(helium_lines.level_number_lower.ix[value]+1),
int(helium_lines.level_number_upper.ix[value]+1),
j_blues[zone].ix[value])
#Running numerical simulations
for zone, _ in enumerate(electron_densities):
os.rename('He_NLTE_Files/abundances{}.txt'.format(zone),
'He_NLTE_Files/abundances_current.txt')
os.rename('He_NLTE_Files/shellconditions{}.txt'.format(zone),
'He_NLTE_Files/shellconditions_current.txt')
os.rename('He_NLTE_Files/discradfield{}.txt'.format(zone),
'He_NLTE_Files/discradfield_current.txt')
os.system("nlte-solver-module/bin/nlte_solvertest >/dev/null")
os.rename('He_NLTE_Files/abundances_current.txt',
'He_NLTE_Files/abundances{}.txt'.format(zone))
os.rename('He_NLTE_Files/shellconditions_current.txt',
'He_NLTE_Files/shellconditions{}.txt'.format(zone))
os.rename('He_NLTE_Files/discradfield_current.txt',
'He_NLTE_Files/discradfield{}.txt'.format(zone))
os.rename('debug_occs.dat', 'He_NLTE_Files/occs{}.txt'.format(zone))
#Reading in populations from files
helium_population = level_boltzmann_factor.ix[2].copy()
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/discradfield{}.txt'.format(zone), 'r') as \
read_file:
for level in range(0, 35):
level_population = read_file.readline()
level_population = float(level_population)
helium_population[zone].ix[0][level] = level_population
helium_population[zone].ix[1].ix[0] = float(
read_file.readline())
#Performing He LTE level populations (upper two energy levels,
#He II excited states, He III)
he_one_population = HeliumNLTE.calculate_helium_one(g_electron,
beta_rad, partition_function, ionization_data,
level_boltzmann_factor, electron_densities, g, w, t_rad,
t_electrons)
helium_population.ix[0].ix[35].update(he_one_population.ix[35])
helium_population.ix[0].ix[36].update(he_one_population.ix[36])
he_two_population = level_boltzmann_factor.ix[2].ix[1].ix[1:].mul(
(g.ix[2,1,0]**(-1)) * helium_population.ix[s1,0])
helium_population.ix[1].ix[1:].update(he_two_population)
helium_population.ix[2].ix[0] = HeliumNLTE.calculate_helium_three(
t_rad, w, zeta_data, t_electrons, delta, g_electron, beta_rad,
partition_function, ionization_data, electron_densities)
unnormalised = helium_population.sum()
normalised = helium_population.mul(ion_number_density.ix[2].sum()
/ unnormalised)
helium_population.update(normalised)
return helium_population
| bsd-3-clause |
ECP-CANDLE/Benchmarks | Pilot1/P1B3/p1b3.py | 1 | 36100 | from __future__ import absolute_import
import collections
import gzip
import logging
import os
import sys
import multiprocessing
import threading
import argparse
try:
import configparser
except ImportError:
import ConfigParser as configparser
import numpy as np
import pandas as pd
from itertools import cycle, islice
try:
from sklearn.impute import SimpleImputer as Imputer
except ImportError:
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..' ))
sys.path.append(lib_path)
lib_path2 = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path2)
import candle
logger = logging.getLogger(__name__)
# Number of data generator workers
WORKERS = 1
#np.set_printoptions(threshold=np.nan)
class BenchmarkP1B3(candle.Benchmark):
def set_locals(self):
"""Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing the additional parameters for the
benchmark.
"""
if required is not None:
self.required = set(required)
if additional_definitions is not None:
self.additional_definitions = additional_definitions
additional_definitions = [
# Feature selection
{'name':'cell_features',
'nargs':'+',
#'default':'argparse.SUPPRESS',
'choices':['expression', 'mirna', 'proteome', 'all', 'categorical'],
'help':'use one or more cell line feature sets: "expression", "mirna", "proteome", "all"; or use "categorical" for one-hot encoding of cell lines'},
{'name':'drug_features',
'nargs':'+',
#'default':'argparse.SUPPRESS',
'choices':['descriptors', 'latent', 'all', 'noise'],
'help':"use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder, or both, or random features; 'descriptors','latent', 'all', 'noise'"},
{'name':'cell_noise_sigma', 'type':float,
'help':"standard deviation of guassian noise to add to cell line features during training"},
# Output selection
{'name':'min_logconc',
'type':float,
#'default':'argparse.SUPPRESS',
'help':"min log concentration of dose response data to use: -3.0 to -7.0"},
{'name':'max_logconc',
'type':float,
#'default':'argparse.SUPPRESS',
'help':"max log concentration of dose response data to use: -3.0 to -7.0"},
{'name':'subsample',
#'default':'argparse.SUPPRESS',
'choices':['naive_balancing', 'none'],
'help':"dose response subsample strategy; 'none' or 'naive_balancing'"},
{'name':'category_cutoffs',
'nargs':'+',
'type':float,
#'default':'argparse.SUPPRESS',
'help':"list of growth cutoffs (between -1 and +1) seperating non-response and response categories"},
# Sample data selection
{'name':'test_cell_split',
'type':float,
#'default':'argparse.SUPPRESS',
'help':"cell lines to use in test; if None use predefined unseen cell lines instead of sampling cell lines used in training"},
# Test random model
{'name':'scramble',
'type': candle.str2bool,
'default': False,
'help':'randomly shuffle dose response data'},
{'name':'workers',
'type':int,
'default':WORKERS,
'help':'number of data generator workers'}
]
required = [
'activation',
'batch_size',
'batch_normalization',
'category_cutoffs',
'cell_features',
'dropout',
'drug_features',
'epochs',
'feature_subsample',
'initialization',
'learning_rate',
'loss',
'min_logconc',
'max_logconc',
'optimizer',
# 'penalty',
'rng_seed',
'scaling',
'subsample',
'test_cell_split',
'val_split',
'cell_noise_sigma'
]
#def common_parser(parser):
#
# parser.add_argument("--config_file", dest='config_file', type=str,
# default=os.path.join(file_path, 'p1b3_default_model.txt'),
# help="specify model configuration file")
#
# # Parse has been split between arguments that are common with the default neon parser
# # and all the other options
# parser = candle.get_default_neon_parse(parser)
# parser = p1_common.get_p1_common_parser(parser)
#
# # Arguments that are applicable just to p1b3
# parser = p1b3_parser(parser)
#
# return parser
#def p1b3_parser(parser):
#
# # Feature selection
# parser.add_argument("--cell_features", nargs='+',
# default=argparse.SUPPRESS,
# choices=['expression', 'mirna', 'proteome', 'all', 'categorical'],
# help="use one or more cell line feature sets: 'expression', 'mirna', 'proteome', 'all'; or use 'categorical' for one-hot encoding of cell lines")
# parser.add_argument("--drug_features", nargs='+',
# default=argparse.SUPPRESS,
# choices=['descriptors', 'latent', 'all', 'noise'],
# help="use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder, or both, or random features; 'descriptors','latent', 'all', 'noise'")
# parser.add_argument("--cell_noise_sigma", type=float,
# help="standard deviation of guassian noise to add to cell line features during training")
# # Output selection
# parser.add_argument("--min_logconc", type=float,
# default=argparse.SUPPRESS,
# help="min log concentration of dose response data to use: -3.0 to -7.0")
# parser.add_argument("--max_logconc", type=float,
# default=argparse.SUPPRESS,
# help="max log concentration of dose response data to use: -3.0 to -7.0")
# parser.add_argument("--subsample",
# default=argparse.SUPPRESS,
# choices=['naive_balancing', 'none'],
# help="dose response subsample strategy; 'none' or 'naive_balancing'")
# parser.add_argument("--category_cutoffs", nargs='+', type=float,
# default=argparse.SUPPRESS,
# help="list of growth cutoffs (between -1 and +1) seperating non-response and response categories")
# # Sample data selection
# parser.add_argument("--test_cell_split", type=float,
# default=argparse.SUPPRESS,
# help="cell lines to use in test; if None use predefined unseen cell lines instead of sampling cell lines used in training")
# # Test random model
# parser.add_argument("--scramble", action="store_true",
# default=False,
# help="randomly shuffle dose response data")
# parser.add_argument("--workers", type=int,
# default=WORKERS,
# help="number of data generator workers")
#
# return parser
#def read_config_file(file):
# config = configparser.ConfigParser()
# config.read(file)
# section = config.sections()
# fileParams = {}
#
# # default config values that we assume exists
# fileParams['activation']=eval(config.get(section[0],'activation'))
# fileParams['batch_size']=eval(config.get(section[0],'batch_size'))
# fileParams['batch_normalization']=eval(config.get(section[0],'batch_normalization'))
# fileParams['category_cutoffs']=eval(config.get(section[0],'category_cutoffs'))
# fileParams['cell_features']=eval(config.get(section[0],'cell_features'))
# fileParams['dropout']=eval(config.get(section[0],'dropout'))
# fileParams['drug_features']=eval(config.get(section[0],'drug_features'))
# fileParams['epochs']=eval(config.get(section[0],'epochs'))
# fileParams['feature_subsample']=eval(config.get(section[0],'feature_subsample'))
# fileParams['initialization']=eval(config.get(section[0],'initialization'))
# fileParams['learning_rate']=eval(config.get(section[0], 'learning_rate'))
# fileParams['loss']=eval(config.get(section[0],'loss'))
# fileParams['min_logconc']=eval(config.get(section[0],'min_logconc'))
# fileParams['max_logconc']=eval(config.get(section[0],'max_logconc'))
# fileParams['optimizer']=eval(config.get(section[0],'optimizer'))
## fileParams['penalty']=eval(config.get(section[0],'penalty'))
# fileParams['rng_seed']=eval(config.get(section[0],'rng_seed'))
# fileParams['scaling']=eval(config.get(section[0],'scaling'))
# fileParams['subsample']=eval(config.get(section[0],'subsample'))
# fileParams['test_cell_split']=eval(config.get(section[0],'test_cell_split'))
# fileParams['val_split']=eval(config.get(section[0],'val_split'))
# fileParams['cell_noise_sigma']=eval(config.get(section[0],'cell_noise_sigma'))
#
# # parse the remaining values
# for k,v in config.items(section[0]):
# if not k in fileParams:
# fileParams[k] = eval(v)
#
def check_params(fileParams):
# Allow for either dense or convolutional layer specification
# if none found exit
try:
fileParams['dense']
except KeyError:
try:
fileParams['conv']
except KeyError:
print("Error! No dense or conv layers specified. Wrong file !! ... exiting ")
raise
else:
try:
fileParams['pool']
except KeyError:
fileParams['pool'] = None
print("Warning ! No pooling specified after conv layer.")
def extension_from_parameters(params, framework):
"""Construct string for saving model with annotation of parameters"""
ext = framework
ext += '.A={}'.format(params['activation'])
ext += '.B={}'.format(params['batch_size'])
ext += '.D={}'.format(params['dropout'])
ext += '.E={}'.format(params['epochs'])
if params['feature_subsample']:
ext += '.F={}'.format(params['feature_subsample'])
if 'cell_noise_sigma' in params:
ext += '.N={}'.format(params['cell_noise_sigma'])
if 'conv' in params:
name = 'LC' if 'locally_connected' in params else 'C'
layer_list = list(range(0, len(params['conv'])))
for l, i in enumerate(layer_list):
filters = params['conv'][i][0]
filter_len = params['conv'][i][1]
stride = params['conv'][i][2]
if filters <= 0 or filter_len <= 0 or stride <= 0:
break
ext += '.{}{}={},{},{}'.format(name, l+1, filters, filter_len, stride)
if 'pool' in params and params['conv'][0] and params['conv'][1]:
ext += '.P={}'.format(params['pool'])
if 'dense' in params:
for i, n in enumerate(params['dense']):
if n:
ext += '.D{}={}'.format(i+1, n)
if params['batch_normalization']:
ext += '.BN'
ext += '.S={}'.format(params['scaling'])
return ext
def scale(df, scaling=None):
"""Scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to scale
scaling : 'maxabs', 'minmax', 'std', or None, optional (default 'std')
type of scaling to apply
"""
if scaling is None or scaling.lower() == 'none':
return df
df = df.dropna(axis=1, how='any')
# Scaling data
if scaling == 'maxabs':
# Normalizing -1 to 1
scaler = MaxAbsScaler()
elif scaling == 'minmax':
# Scaling to [0,1]
scaler = MinMaxScaler()
else:
# Standard normalization
scaler = StandardScaler()
mat = df.as_matrix()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def impute_and_scale(df, scaling='std'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = df.dropna(axis=1, how='all')
#imputer = Imputer(strategy='mean', axis=0)
imputer = Imputer(strategy='mean')
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def load_cellline_expressions(path, dtype, ncols=None, scaling='std'):
"""Load cell line expression data, sub-select columns of gene expression
randomly if specificed, scale the selected data and return a
pandas dataframe.
Parameters
----------
path: string
path to 'RNA_5_Platform_Gene_Transcript_Averaged_intensities.transposed.txt'
dtype: numpy type
precision (data type) for reading float values
ncols : int or None
number of columns (gene expression) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(dtype)
df = pd.concat([df1, df2], axis=1)
return df
def load_cellline_mirna(path, dtype, ncols=None, scaling='std'):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
path: string
path to 'RNA__microRNA_OSU_V3_chip_log2.transposed.txt'
dtype: numpy type
precision (data type) for reading float values
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(dtype)
df = pd.concat([df1, df2], axis=1)
return df
def load_cellline_proteome(path, dtype, kinome_path=None, ncols=None, scaling='std'):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
path: string
path to 'nci60_proteome_log2.transposed.tsv'
dtype: numpy type
precision (data type) for reading float values
kinome_path: string or None (default None)
path to 'nci60_kinome_log2.transposed.tsv'
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c')
df = df.set_index('CellLine')
if kinome_path:
df_k = pd.read_csv(kinome_path, sep='\t', engine='c')
df_k = df_k.set_index('CellLine')
df_k = df_k.add_suffix('.K')
df = df.merge(df_k, left_index=True, right_index=True)
index = df.index.map(lambda x: x.replace('.', ':'))
total = df.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df = df.iloc[:, usecols]
df = impute_and_scale(df, scaling)
df = df.astype(dtype)
df.index = index
df.index.names = ['CELLNAME']
df = df.reset_index()
return df
def load_drug_descriptors(path, dtype, ncols=None, scaling='std'):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
path: string
path to 'descriptors.2D-NSC.5dose.filtered.txt'
dtype: numpy type
precision (data type) for reading float values
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''],
dtype=dtype,
converters ={'NAME' : str})
df1 = pd.DataFrame(df.loc[:,'NAME'])
df1.rename(columns={'NAME': 'NSC'}, inplace=True)
df2 = df.drop('NAME', 1)
# # Filter columns if requested
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:,usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(dtype)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_drug_autoencoded(path, dtype, ncols=None, scaling='std'):
"""Load drug latent representation from autoencoder, sub-select
columns of drugs randomly if specificed, impute and scale the
selected data, and return a pandas dataframe.
Parameters
----------
path: string
path to 'Aspuru-Guzik_NSC_latent_representation_292D.csv'
dtype: numpy type
precision (data type) for reading float values
ncols : int or None
number of columns (drug latent representations) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, engine='c', converters ={'NSC' : str}, dtype=dtype)
df1 = pd.DataFrame(df.loc[:, 'NSC'])
df2 = df.drop('NSC', 1)
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(dtype)
df = pd.concat([df1, df2], axis=1)
return df
def load_dose_response(path, seed, dtype, min_logconc=-5., max_logconc=-5., subsample=None):
"""Load cell line response to different drug compounds, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
path: string
path to 'NCI60_dose_response_with_missing_z5_avg.csv'
seed: integer
seed for random generation
dtype: numpy type
precision (data type) for reading float values
min_logconc : -3, -4, -5, -6, -7, optional (default -5)
min log concentration of drug to return cell line growth
max_logconc : -3, -4, -5, -6, -7, optional (default -5)
max log concentration of drug to return cell line growth
subsample: None, 'naive_balancing' (default None)
subsampling strategy to use to balance the data based on growth
"""
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na','-',''],
dtype={'NSC':object, 'CELLNAME':str, 'LOG_CONCENTRATION':dtype, 'GROWTH':dtype})
df = df[(df['LOG_CONCENTRATION'] >= min_logconc) & (df['LOG_CONCENTRATION'] <= max_logconc)]
df = df[['NSC', 'CELLNAME', 'GROWTH', 'LOG_CONCENTRATION']]
if subsample and subsample == 'naive_balancing':
df1 = df[df['GROWTH'] <= 0]
df2 = df[(df['GROWTH'] > 0) & (df['GROWTH'] < 50)].sample(frac=0.7, random_state=seed)
df3 = df[(df['GROWTH'] >= 50) & (df['GROWTH'] <= 100)].sample(frac=0.18, random_state=seed)
df4 = df[df['GROWTH'] > 100].sample(frac=0.01, random_state=seed)
df = pd.concat([df1, df2, df3, df4])
df = df.set_index(['NSC'])
return df
def stage_data():
server = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
cell_expr_path = candle.fetch_file(server+'P1B3_cellline_expressions.tsv', 'Pilot1', untar=False)
cell_mrna_path = candle.fetch_file(server+'P1B3_cellline_mirna.tsv', 'Pilot1', untar=False)
cell_prot_path = candle.fetch_file(server+'P1B3_cellline_proteome.tsv', 'Pilot1', untar=False)
cell_kino_path = candle.fetch_file(server+'P1B3_cellline_kinome.tsv', 'Pilot1', untar=False)
drug_desc_path = candle.fetch_file(server+'P1B3_drug_descriptors.tsv', 'Pilot1', untar=False)
drug_auen_path = candle.fetch_file(server+'P1B3_drug_latent.csv', 'Pilot1', untar=False)
dose_resp_path = candle.fetch_file(server+'P1B3_dose_response.csv', 'Pilot1', untar=False)
test_cell_path = candle.fetch_file(server+'P1B3_test_celllines.txt', 'Pilot1', untar=False)
test_drug_path = candle.fetch_file(server+'P1B3_test_drugs.txt', 'Pilot1', untar=False)
return(cell_expr_path, cell_mrna_path, cell_prot_path, cell_kino_path,
drug_desc_path, drug_auen_path, dose_resp_path, test_cell_path,
test_drug_path)
class DataLoader(object):
"""Load merged drug response, drug descriptors and cell line essay data
"""
def __init__(self, seed, dtype, val_split=0.2, test_cell_split=None, shuffle=True,
cell_features=['expression'], drug_features=['descriptors'],
feature_subsample=None, scaling='std', scramble=False,
min_logconc=-5., max_logconc=-4., subsample='naive_balancing',
category_cutoffs=[0.]):
"""Initialize data merging drug response, drug descriptors and cell line essay.
Shuffle and split training and validation set
Parameters
----------
seed: integer
seed for random generation
dtype: numpy type
precision (data type) for reading float values
val_split : float, optional (default 0.2)
fraction of data to use in validation
test_cell_split : float or None, optional (default None)
fraction of cell lines to use in test; if None use predefined unseen cell lines instead of sampling cell lines used in training
shuffle : True or False, optional (default True)
if True shuffles the merged data before splitting training and validation sets
cell_features: list of strings from 'expression', 'mirna', 'proteome', 'all', 'categorical' (default ['expression'])
use one or more cell line feature sets: gene expression, microRNA, proteomics; or, use 'categorical' for one-hot encoded cell lines
drug_features: list of strings from 'descriptors', 'latent', 'all', 'noise' (default ['descriptors'])
use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder trained on NSC drugs, or both; use random features if set to noise
feature_subsample: None or integer (default None)
number of feature columns to use from cellline expressions and drug descriptors
scaling: None, 'std', 'minmax' or 'maxabs' (default 'std')
type of feature scaling: 'maxabs' to [-1,1], 'maxabs' to [-1, 1], 'std' for standard normalization
scramble: True or False, optional (default False)
if True randomly shuffle dose response data as a control
min_logconc: float value between -3 and -7, optional (default -5.)
min log concentration of drug to return cell line growth
max_logconc: float value between -3 and -7, optional (default -4.)
max log concentration of drug to return cell line growth
subsample: 'naive_balancing' or None
if True balance dose response data with crude subsampling
category_cutoffs: list of floats (between -1 and +1) (default None)
growth thresholds seperating non-response and response categories
"""
cell_expr_path, cell_mrna_path, cell_prot_path, cell_kino_path,drug_desc_path, drug_auen_path, dose_resp_path, test_cell_path, test_drug_path = stage_data()
# Seed random generator for loading data
np.random.seed(seed)
df = load_dose_response(dose_resp_path, seed, dtype,
min_logconc=min_logconc, max_logconc=max_logconc, subsample=subsample)
logger.info('Loaded {} unique (D, CL) response sets.'.format(df.shape[0]))
# df[['GROWTH', 'LOG_CONCENTRATION']].to_csv('all.response.csv')
df = df.reset_index()
if 'all' in cell_features:
self.cell_features = ['expression', 'mirna', 'proteome']
else:
self.cell_features = cell_features
if 'all' in drug_features:
self.drug_features = ['descriptors', 'latent']
else:
self.drug_features = drug_features
self.input_shapes = collections.OrderedDict()
self.input_shapes['drug_concentration'] = (1,)
for fea in self.cell_features:
if fea == 'expression':
self.df_cell_expr = load_cellline_expressions(cell_expr_path, dtype, ncols=feature_subsample, scaling=scaling)
self.input_shapes['cell_expression'] = (self.df_cell_expr.shape[1] - 1,)
df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')
elif fea == 'mirna':
self.df_cell_mirna = load_cellline_mirna(cell_mrna_path, dtype, ncols=feature_subsample, scaling=scaling)
self.input_shapes['cell_microRNA'] = (self.df_cell_mirna.shape[1] - 1,)
df = df.merge(self.df_cell_mirna[['CELLNAME']], on='CELLNAME')
elif fea == 'proteome':
self.df_cell_prot = load_cellline_proteome(cell_prot_path, dtype, cell_kino_path, ncols=feature_subsample, scaling=scaling)
self.input_shapes['cell_proteome'] = (self.df_cell_prot.shape[1] - 1,)
df = df.merge(self.df_cell_prot[['CELLNAME']], on='CELLNAME')
elif fea == 'categorical':
df_cell_ids = df[['CELLNAME']].drop_duplicates()
cell_ids = df_cell_ids['CELLNAME'].map(lambda x: x.replace(':', '.'))
df_cell_cat = pd.get_dummies(cell_ids)
df_cell_cat.index = df_cell_ids['CELLNAME']
self.df_cell_cat = df_cell_cat.reset_index()
self.input_shapes['cell_categorical'] = (self.df_cell_cat.shape[1] - 1,)
for fea in self.drug_features:
if fea == 'descriptors':
self.df_drug_desc = load_drug_descriptors(drug_desc_path, dtype, ncols=feature_subsample, scaling=scaling)
self.input_shapes['drug_descriptors'] = (self.df_drug_desc.shape[1] - 1,)
df = df.merge(self.df_drug_desc[['NSC']], on='NSC')
elif fea == 'latent':
self.df_drug_auen = load_drug_autoencoded(drug_auen_path, dtype, ncols=feature_subsample, scaling=scaling)
self.input_shapes['drug_SMILES_latent'] = (self.df_drug_auen.shape[1] - 1,)
df = df.merge(self.df_drug_auen[['NSC']], on='NSC')
elif fea == 'noise':
df_drug_ids = df[['NSC']].drop_duplicates()
noise = np.random.normal(size=(df_drug_ids.shape[0], 500))
df_rand = pd.DataFrame(noise, index=df_drug_ids['NSC'],
columns=['RAND-{:03d}'.format(x) for x in range(500)])
self.df_drug_rand = df_rand.reset_index()
self.input_shapes['drug_random_vector'] = (self.df_drug_rand.shape[1] - 1,)
logger.debug('Filtered down to {} rows with matching information.'.format(df.shape[0]))
# df[['GROWTH', 'LOG_CONCENTRATION']].to_csv('filtered.response.csv')
df_test_cell = pd.read_csv(test_cell_path)
df_test_drug = pd.read_csv(test_drug_path, dtype={'NSC':object})
df_train_val = df[(~df['NSC'].isin(df_test_drug['NSC'])) & (~df['CELLNAME'].isin(df_test_cell['CELLNAME']))]
logger.debug('Combined train and validation set has {} rows'.format(df_train_val.shape[0]))
if test_cell_split and test_cell_split > 0:
df_test_cell = df_train_val[['CELLNAME']].drop_duplicates().sample(frac=test_cell_split, random_state=seed)
logger.debug('Use unseen drugs and a fraction of seen cell lines for testing: ' + ', '.join(sorted(list(df_test_cell['CELLNAME']))))
else:
logger.debug('Use unseen drugs and predefined unseen cell lines for testing: ' + ', '.join(sorted(list(df_test_cell['CELLNAME']))))
df_test = df.merge(df_test_cell, on='CELLNAME').merge(df_test_drug, on='NSC')
logger.debug('Test set has {} rows'.format(df_test.shape[0]))
if shuffle:
df_train_val = df_train_val.sample(frac=1.0, random_state=seed)
df_test = df_test.sample(frac=1.0, random_state=seed)
self.df_response = pd.concat([df_train_val, df_test]).reset_index(drop=True)
if scramble:
growth = self.df_response[['GROWTH']]
random_growth = growth.iloc[np.random.permutation(np.arange(growth.shape[0]))].reset_index()
self.df_response[['GROWTH']] = random_growth['GROWTH']
logger.warn('Randomly shuffled dose response growth values.')
logger.info('Distribution of dose response:')
logger.info(self.df_response[['GROWTH']].describe())
if category_cutoffs is not None:
growth = self.df_response['GROWTH']
classes = np.digitize(growth, category_cutoffs)
bc = np.bincount(classes)
min_g = np.min(growth) / 100
max_g = np.max(growth) / 100
logger.info('Category cutoffs: {}'.format(category_cutoffs))
logger.info('Dose response bin counts:')
for i, count in enumerate(bc):
lower = min_g if i == 0 else category_cutoffs[i-1]
upper = max_g if i == len(bc)-1 else category_cutoffs[i]
logger.info(' Class {}: {:7d} ({:.4f}) - between {:+.2f} and {:+.2f}'.
format(i, count, count/len(growth), lower, upper))
logger.info(' Total: {:9d}'.format(len(growth)))
self.total = df_train_val.shape[0]
self.n_test = df_test.shape[0]
self.n_val = int(self.total * val_split)
self.n_train = self.total - self.n_val
logger.info('Rows in train: {}, val: {}, test: {}'.format(self.n_train, self.n_val, self.n_test))
logger.info('Input features shapes:')
for k, v in self.input_shapes.items():
logger.info(' {}: {}'.format(k, v))
self.input_dim = sum([np.prod(x) for x in self.input_shapes.values()])
logger.info('Total input dimensions: {}'.format(self.input_dim))
class DataGenerator(object):
"""Generate training, validation or testing batches from loaded data
"""
def __init__(self, data, partition='train', batch_size=32, shape=None, concat=True, name='', cell_noise_sigma=None):
"""Initialize data
Parameters
----------
data: DataLoader object
loaded data object containing original data frames for molecular, drug and response data
partition: 'train', 'val', or 'test'
partition of data to generate for
batch_size: integer (default 32)
batch size of generated data
shape: None, '1d' or 'add_1d' (default None)
keep original feature shapes, make them flat or add one extra dimension (for convolution or locally connected layers in some frameworks)
concat: True or False (default True)
concatenate all features if set to True
cell_noise_sigma: float
standard deviation of guassian noise to add to cell line features during training
"""
self.lock = threading.Lock()
self.data = data
self.partition = partition
self.batch_size = batch_size
self.shape = shape
self.concat = concat
self.name = name
self.cell_noise_sigma = cell_noise_sigma
if partition == 'train':
self.cycle = cycle(range(data.n_train))
self.num_data = data.n_train
elif partition == 'val':
self.cycle = cycle(range(data.total)[-data.n_val:])
self.num_data = data.n_val
elif partition == 'test':
self.cycle = cycle(range(data.total, data.total + data.n_test))
self.num_data = data.n_test
else:
raise Exception('Data partition "{}" not recognized.'.format(partition))
def flow(self):
"""Keep generating data batches
"""
while 1:
self.lock.acquire()
indices = list(islice(self.cycle, self.batch_size))
# print("\nProcess: {}, Batch indices start: {}".format(multiprocessing.current_process().name, indices[0]))
# logger.debug('Gen {} at index: {}'.format(self.name, indices[0]))
self.lock.release()
df = self.data.df_response.iloc[indices, :]
cell_column_beg = df.shape[1]
for fea in self.data.cell_features:
if fea == 'expression':
df = pd.merge(df, self.data.df_cell_expr, on='CELLNAME')
elif fea == 'mirna':
df = pd.merge(df, self.data.df_cell_mirna, on='CELLNAME')
elif fea == 'proteome':
df = pd.merge(df, self.data.df_cell_prot, on='CELLNAME')
elif fea == 'categorical':
df = pd.merge(df, self.data.df_cell_cat, on='CELLNAME')
cell_column_end = df.shape[1]
for fea in self.data.drug_features:
if fea == 'descriptors':
df = df.merge(self.data.df_drug_desc, on='NSC')
elif fea == 'latent':
df = df.merge(self.data.df_drug_auen, on='NSC')
elif fea == 'noise':
df = df.merge(self.data.df_drug_rand, on='NSC')
df = df.drop(['CELLNAME', 'NSC'], 1)
x = np.array(df.iloc[:, 1:])
if self.cell_noise_sigma:
c1 = cell_column_beg - 3
c2 = cell_column_end - 3
x[:, c1:c2] += np.random.randn(df.shape[0], c2-c1) * self.cell_noise_sigma
y = np.array(df.iloc[:, 0])
y = y / 100.
if self.concat:
if self.shape == 'add_1d':
yield x.reshape(x.shape + (1,)), y
else:
yield x, y
else:
x_list = []
index = 0
for v in self.data.input_shapes.values():
length = np.prod(v)
subset = x[:, index:index+length]
if self.shape == '1d':
reshape = (x.shape[0], length)
elif self.shape == 'add_1d':
reshape = (x.shape[0],) + v + (1,)
else:
reshape = (x.shape[0],) + v
x_list.append(subset.reshape(reshape))
index += length
yield x_list, y
| mit |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-stats-johnsonsb-1.py | 1 | 1134 | from scipy.stats import johnsonsb
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
# Calculate a few first moments:
a, b = 4.32, 3.18
mean, var, skew, kurt = johnsonsb.stats(a, b, moments='mvsk')
# Display the probability density function (``pdf``):
x = np.linspace(johnsonsb.ppf(0.01, a, b),
johnsonsb.ppf(0.99, a, b), 100)
ax.plot(x, johnsonsb.pdf(x, a, b),
'r-', lw=5, alpha=0.6, label='johnsonsb pdf')
# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.
# Freeze the distribution and display the frozen ``pdf``:
rv = johnsonsb(a, b)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Check accuracy of ``cdf`` and ``ppf``:
vals = johnsonsb.ppf([0.001, 0.5, 0.999], a, b)
np.allclose([0.001, 0.5, 0.999], johnsonsb.cdf(vals, a, b))
# True
# Generate random numbers:
r = johnsonsb.rvs(a, b, size=1000)
# And compare the histogram:
ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
| gpl-2.0 |
mwjackson/heartnn | ffnn.py | 1 | 2242 | import csv
from pprint import pprint
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import ClassificationDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import TanhLayer, SoftmaxLayer
from pybrain.utilities import percentError
import random
from sklearn.metrics import precision_score, recall_score, confusion_matrix
ds = ClassificationDataSet(13, nb_classes=2, class_labels=['healthy', 'heart disease'])
with open('heart_data_norm.txt', 'r') as f:
reader = csv.reader(f, delimiter='\t')
next(reader, None) # skip header
rows = [r for r in reader]
random.shuffle(rows) # randomly shuffle the data
# inspect a row from file
pprint(rows[:1])
# add rows to dataset
for row in rows:
ds.appendLinked(row[:13], row[13:])
# convert from single bool, to 2 mutually exclusive categories ['not_heartdisease', 'heartdisease']
ds._convertToOneOfMany()
# check what we have
print ds.calculateStatistics()
print ds.getLinked(0)
# build network
# by default hidden layer is sigmoid
net = buildNetwork(ds.indim, 10, ds.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
test_ds, train_ds = ds.splitWithProportion(0.15)
# create backprop trainer
trainer = BackpropTrainer(net, train_ds, learningrate=0.01, lrdecay=1.0, momentum=0.1, verbose=True, batchlearning=False, weightdecay=0.0)
# train until end
errors = trainer.trainUntilConvergence(maxEpochs=1000, verbose=True, continueEpochs=10, validationProportion=0.15)
train_result = trainer.testOnClassData()
test_result = trainer.testOnClassData(dataset=test_ds)
print 'total epochs: {0} training error: {1}% test error: {2}%'.format(
trainer.totalepochs, percentError(train_result, [0, 1]), percentError(test_result, [0, 1]))
predicated_values = test_result
actual_values = [int(v[1]) for v in list(test_ds['target'])] # convert categorical back to single var
print 'actual:'
print actual_values
print 'predicted:'
print predicated_values
print 'precision: {0}'.format(str(precision_score(actual_values, predicated_values)))
print 'recall: {0}'.format(str(recall_score(actual_values, predicated_values)))
print 'confusion matrix:'
print confusion_matrix(actual_values, predicated_values) | mit |
lin-credible/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
nelson-liu/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
joeyginorio/Action-Understanding-with-Rational-Rules | experiments/2/data/model_predictions.py | 1 | 17996 | import sys
sys.path.append('../../..')
from model_src.grid_world import GridWorld
from model_src.grid import Grid
from model_src.hypothesis import Hypothesis
from model_src.inference_machine import InferenceMachine
import numpy as np
import csv
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import collections
import sys
#################### Testing ############################
use = True
trials = [use for i in range(16)]
# trials[3] = True
trialOn = 0
if len(sys.argv) > 1:
trialOn = int(sys.argv[1])
trials[trialOn-1] = True
ra = [0,1]
rc = [0,1]
else:
sys.argv.append('-1')
sys.argv.append('-1')
sys.argv.append('-1')
ra = [0,1]
rc = [0,1]
#Wall
actions = [
#Single
[[6,6,6]],
[[6,6]],
#Double
[[6,6,6,6,'take',1,1,1]],
[[6,6,8,8,'take',5,5,5]],
#Triple
[[6,6,6,6,'take',2,2,2,2,'take',8,8,8]],
[[6,6,6,6,'take',1,1,1,1,'take',5,5,5]],
[[6,6,8,8,'take',5,5,5,5,'take',3,3,3]],
[[6,6,8,8,'take',0,0,0,0,'take',2,2,2]],
# No wall
#Single
[[6,6,6]],
[[6,6]],
#Double
[[6,6,6,6,'take',1,1,1]],
[[3,3,3,3,'take',5,5,5]],
#Triple
[[6,6,6,6,'take',2,2,2,2,'take',8,8,8]],
[[6,6,6,6,'take',1,1,1,1,'take',5,5,5]],
[[3,3,3,3,'take',5,5,5,5,'take',3,3,3]],
[[3,3,3,3,'take',0,0,0,0,'take',2,2,2]],
]
# chek = [
# ["'A'", "Or('A','B')", "Or('A','C')"],
# ["'B'", "Or('B',Then('B','A'))", "Or('B',Then('B','C'))"],
# ["Then('A','B')", "And('A','B')", "And('A',Or('B','C'))"],
# ["Then('B','A')", "Then('B',Or('A','C'))", "Then(Or('B','C'),'A')"],
# ["Then('A','C')", "And('A','C')", "Then(Or('A','B'),'C')"],
# ["Then('A','B','C')", "And('A','B','C')", "And('C',Then('A','B'))"],
# ["Then('A','C','B')", "And('B',Then('A','C'))", "Then(And('A','C'),'B')"],
# ["Then('B','A','C')", "Then('B',And('A','C'))", "Then(Or('B','C'),'A','C')"],
# ["Or('A','C')", "Or('A','B','C')", "Or('A','C',Then('B','A'))"],
# ["And('B',Or('A','C'))", "Then(Or('A','C'),'B')", "Then(Or('A','B','C'),'B')"],
# ["Then(Or('B','C'),'A')", "Or(And('A','C'),Then('B','A'))", "And('A',Or('B','C'))"],
# ["And('A','C')", "Or(And('A','C'),Then('B','C'))", "Or(And('A','C'),Then('B','A'))"],
# ["'A'", "Or('A','B')", "Or('A','C')"],
# ["Then('A','B')", "And('A','B')", "And('A',Or('B','C'))"],
# ["Then('B','A')", "Then('B',Or('A','C'))", "Then(Or('B','C'),'A')"],
# ["Then('A','C')", "And('A','C')", "Then(Or('A','B'),'C')"],
# ["Or(Then('A','C'),Then('B','A'))", "Or(And('A','C'),Then('B','A'))","And('A',Or('B','C'))"],
# ["Then('B',Or('A','C'))", "And('B',Or('A','C'))", "Then('B',Or('A','B','C'))"],
# ["Then(Or('A','B'),'C')", "Or(And('A','C'),Then('B','C'))", "And('C',Or('A','B'))"]
# ]
# chek = [['A', 'A or B', 'A or C', 'A and C', 'A and B', 'A then B', 'A then C'], ['B then A then C', 'B', 'B then C', 'B then A', 'B then C then A'], ['C', 'B', 'B then C', 'B then A', 'B then A then C', 'C then B', 'B then C then A'], ['C', 'B or C', 'C then A then B', 'B and C', 'C then B then A', 'C then A', 'C then B'], ['A and B and C', 'A and (B or C)', 'A then B then C', 'C and (A then B)', 'A and B', 'A then B'], ['(A and C) then B', 'A and C', 'A then C then B', 'B and (A then C)', 'A then C'], ['B then A', 'B then A then C', 'B then (A and C)', 'B then (A or C)', '(B or C) then A'], ['B then (A and C)', 'B then C', 'B and C', 'B then (A or C)', '(A or B) then C', 'B and (C then A)', 'B then C then A'], ['C then A then B', 'A and C', '(B or C) then A', 'C then (A or B)', 'C then A', 'B and (C then A)'], ['C then (A or B)', 'C then B then A', 'C then B', 'B and C', '(B and C) then A'], ['A and B and C', '(A and B) then C', 'A then B then C', 'C and (A then B)', 'A then (B and C)', 'A and (B then C)'], ['A then C then B', 'A and (C then B)', '(A and C) then B', 'B and (A then C)', 'B and (A and C)', 'A then (B and C)'], ['B then (A and C)', 'C and (B then A)', '(A and B) then C', 'B and (A then C)', 'B then A then C', 'B and (A and C)'], ['B and (A and C)', 'B then C then A', '(B and C) then A', 'B then (A and C)', 'B and (C then A)'], ['C and (A then B)', 'C then A then B', 'C then (A and B)', '(A and C) then B', 'B and (A and C)', 'B and (C then A)'], ['C then B then A', 'C and (B then A)', 'C then (A and B)', 'A and (C then B)', '(B and C) then A']]
# chek = [['B', 'B then C', 'B then A'], ['C', 'B', 'B then C'], ['B then C', 'B then C then A', 'B and C'], ['C then A', 'C then A then B', 'A and C'], ['B then A then C', 'B then (A and C)', 'C and (B then A)'], ['B then C then A', 'B then (A and C)', 'B and (C then A)'], ['C then A then B', 'B and (C then A)', '(A and C) then B'], ['C then B then A', 'C and (B then A)', 'C then (A and B)'],['B', 'B then C', 'B then A'], ['C', 'B', 'B then C'], ['B then C', 'B then C then A', 'B and C'], ['C then A', 'C then A then B', 'A and C'], ['B then A then C', 'B then (A and C)', 'C and (B then A)'], ['B then C then A', 'B then (A and C)', 'B and (C then A)'], ['C then A then B', 'B and (C then A)', '(A and C) then B'], ['C then B then A', 'C and (B then A)', 'C then (A and B)']]
# chek = [['B', 'B then C', 'B then A'], ['C', 'B', 'B then C'], ['B then C', 'B then C then A', 'B and C'], ['C then A', 'C then A then B', 'B and (C then A)'], ['B then A then C', 'B then (A and C)', '(A and B) then C'], ['B then C then A', 'B and (C then A)', 'B then (A and C)'], ['C then A then B', 'B and (C then A)', '(A and C) then B'], ['C then B then A', '(B and C) then A', 'C and (B then A)'], ['B', 'B then C', 'B then A'], ['B', 'B then C', 'B then A'], ['B then C', 'B then C then A', 'B then (A or C)'], ['C then A', 'C then A then B', 'A and C'], ['B then A then C', 'B then (A and C)', '(A and B) then C'], ['B then C then A', 'B then (A and C)', 'A and (B then C)'], ['C then A then B', 'B and (C then A)', '(A and C) then B'], ['C then B then A', '(B and C) then A', 'C and (B then A)']]
chek = \
[['B', 'A', 'C'],
['C', 'B', 'A'],
['B then C', 'B then A', 'B then C then A'],
['C then A', 'C then B', 'C then A then B'],
['B then A then C', 'B then (A and C)', '(A and B) then C'],
['B then C then A', 'B and (C then A)', 'B then (A and C)'],
['C then A then B', 'B and (C then A)', '(A and C) then B'],
['C then B then A', '(B and C) then A', 'C and (B then A)'],
['B', 'A', 'C'],
['B', 'A', 'C'],
['B then C', 'B then A', 'B then C then A'],
['C then A', 'C then B', 'C then A then B'],
['B then A then C', 'B then (A and C)', '(A and B) then C'],
['B then C then A', 'B then (A and C)', 'A and (B then C)'],
['C then A then B', 'B and (C then A)', '(A and C) then B'],
['C then B then A', '(B and C) then A', 'C and (B then A)']]
# chek = [['C', 'B', 'A'], ['C', 'B', 'A'], ['B then C', 'B then A', 'B then A then C'], ['C then B', 'C then A', 'C then B then A'], ['B then A then C', 'B and (A then C)', '(A and B) then C'], ['B then C then A', '(B and C) then A', 'A and (B then C)'], ['C then A then B', 'C and (A then B)', '(A and C) then B'], ['C then B then A', '(B and C) then A', 'C and (B then A)'], ['C', 'B', 'A'], ['C', 'B', 'A'], ['B then C', 'B then A', 'B then A then C'], ['C then B', 'C then A', 'C then B then A'], ['B then A then C', 'B and (A then C)', '(A and B) then C'], ['B then C then A', '(B and C) then A', 'A and (B then C)'], ['C then A then B', 'C and (A then B)', '(A and C) then B'], ['C then B then A', '(B and C) then A', 'C and (B then A)']]
# chek = [['C', 'B', 'A', 'A then C', 'B then C', 'B or C'], ['C', 'B', 'A', 'A then C', 'B then C', 'B or C'], ['B then C', 'B then A', 'B then A then C', 'B then C then A', 'B and C', 'A and B'], ['C then B', 'C then A', 'C then B then A', 'C then A then B', 'B and C', 'A and C'], ['B then A then C', 'B and (A then C)', '(A and B) then C', 'C and (B then A)', 'B then (A and C)', 'C or (B then A then C)'], ['B then C then A', '(B and C) then A', 'A and (B then C)', 'B and (C then A)', 'B then (A and C)', 'B or (B then C then A)'], ['C then A then B', 'C and (A then B)', '(A and C) then B', 'C then (A and B)', 'B and (C then A)', 'A or (C then A then B)'], ['C then B then A', '(B and C) then A', 'C and (B then A)', 'C then (A and B)', 'A and (C then B)', 'B or (C then B then A)'], ['C', 'B', 'A', 'A then C', 'B then C', 'B or C'], ['C', 'B', 'A', 'A then C', 'B then C', 'B or C'], ['B then C', 'B then A', 'B then A then C', 'B then C then A', 'B and C', 'A and B'], ['C then B', 'C then A', 'C then B then A', 'C then A then B', 'B and C', 'A and C'], ['B then A then C', 'B and (A then C)', '(A and B) then C', 'C and (B then A)', 'B then (A and C)', 'C or (B then A then C)'], ['B then C then A', '(B and C) then A', 'A and (B then C)', 'B and (C then A)', 'B then (A and C)', 'B or (B then C then A)'], ['C then A then B', 'C and (A then B)', '(A and C) then B', 'C then (A and B)', 'B and (C then A)', 'A or (C then A then B)'], ['C then B then A', '(B and C) then A', 'C and (B then A)', 'C then (A and B)', 'A and (C then B)', 'B or (C then B then A)']]
testGrid = Grid('grid_worlds/bookGrid.txt', True)
testGrid2 = Grid('grid_worlds/testGrid2.txt', True)
grid1 = [[testGrid] for i in range(8)]
grid2 = [[testGrid2] for i in range(8,17)]
grid = grid1 + grid2
start1 = [[20] for i in range(17)]
start = start1
#############################################################
def getDepthPosterior(posterior, primHypotheses, depth):
currentPosterior = posterior[0]
for i in range(1,len(posterior)):
currentPosterior *= posterior[1]
posterior = currentPosterior
total = list()
for i in range(2,depth+1):
temp = 0
for j in range(len(posterior)):
if primHypotheses[j] < i+2:
temp += posterior[j]
else:
break
total.append(temp)
return total
H = Hypothesis(Grid('grid_worlds/testGrid.txt', True))
depth = 6
# H.sampleHypotheses(5000)
H.BFSampler(depth)
H.parseToEnglish()
# H.flattenAll()
oFile = open('model_results_exp2_'+sys.argv[1]+'_'+'.csv','w')
CSV = csv.writer(oFile)
# CSV.writerow(['Trial','Rational Action', 'Rational Choice', 'Hypothesis Rank',
# 'Hypothesis','Posterior'])
# oFile2 = open('parsed_english.csv','w')
# CSV2 = csv.writer(oFile2)
# oFile3 = open('marginalLikelihood'+'Trial'+sys.argv[1]+'.csv','w')
# CSV3 = csv.writer(oFile3)
# CSV3.writerow(['Trial','Model','Depth2','Depth3','Depth4','Depth5','Depth6'])
stimCounter = 1
lesion = ['RA/RC Lesion', 'RA Lesion','RC Lesion','Full Model']
cfull = list()
infers = list()
wall = [True]*8 + [False]*8
for trial in range(len(trials)):
if trials[trial]:
print '\n_______________'
print 'TRIAL ',trial
print '_______________'
# plt.rcParams.update({'font.size': 100})
# plt.rcParams['xtick.major.pad']='15'
# plt.rcParams['ytick.major.pad']='15'
# fig = plt.figure(figsize=(100,100))
# fig.suptitle('Experiment '+str(stimCounter),fontsize=300)
allMaxHyp = set()
allData = list()
allEval = list()
for i in ra:
for j in rc:
if i != j:
continue
if trials[trial]:
print '\n--------------------------------------------------------'
# print 'Test single_1:'
print 'Rational Action: ',i
print 'Rational Choice: ',j
print '\n'
infer = InferenceMachine(3, grid[trial], start[trial], actions[trial], tauChoice=1,
rationalAction=i, rationalChoice=j, hypotheses=H,MCMCOn=False,trials=trialOn)
infers.append(infer)
c = list()
for k in range(3):
# ind = infer.hypotheses.index(infer.maxHyp[k])
ind = H.englishHypotheses.index(chek[trial][k])
if i == 1 and j == 1:
CSV.writerow([trial+1,'Full Model',wall[trial], k+1,H.englishHypotheses[ind],infer.posteriors[-1][ind]])
c.append(H.englishHypotheses[ind])
elif i == 0 and j == 0:
CSV.writerow([trial+1,'Full Lesion', wall[trial],k+1,H.englishHypotheses[ind],infer.posteriors[-1][ind]])
if i ==1 and j==1:
cfull.append(c)
# allMaxHyp = set(infer.maxHypMCMC[0:3])`
# allData.append(infer.hypPosteriorMCMC)
# allEval.append(infer.evalHypMCMC)
# CSV
# OLD
# unblock below this line
# print '\n--------------------------------------------------------'
# # print 'Test single_1:'
# print 'Rational Action: ',i
# print 'Rational Choice: ',j
# print '\n'
# infer = InferenceMachine(3, grid[trial], start[trial], actions[trial], tauChoice=.01,
# rationalAction=i, rationalChoice=j, hypotheses=H,MCMCOn=False,trials=trialOn)
# for k in range(3):
# ind = infer.hypotheses.index(chek[trial][k])
# if i == 1 and j == 1:
# CSV.writerow([trial+1,'Full Model',k+1,H.englishHypotheses[ind],infer.posteriors[-1][ind]])
# elif i == 1 and j == 0:
# CSV.writerow([trial+1,'Alternate Model',k+1,H.englishHypotheses[ind],infer.posteriors[-1][ind]])
# # allMaxHyp = set(infer.maxHypMCMC[0:3])`
# # allData.append(infer.hypPosteriorMCMC)
# # allEval.append(infer.evalHypMCMC)
# # CSV3.writerow([stimCounter,lesion[i*2+1*j]] + getDepthPosterior(infer.posteriors, infer.primHypotheses, depth))
# allMaxHyp = chek[trial]
# allResults = list()
# if trials[trial]:
# for i in range(len(allData)):
# # allMaxHyp = list(allMaxHyp)
# results = dict()
# for h in allMaxHyp:
# if h in allData[i].keys():
# results.update({h:allData[i][h]})
# else:
# ht = h
# ht = ht.replace('And','H.And')
# ht = ht.replace('Or','H.Or')
# ht = ht.replace('Then','H.Then')
# evalH = eval(ht)
# check = [np.array_equal(evalH,c) for c in allEval[i]]
# if any(check):
# ind = check.index(True)
# # print ind
# results.update( {h: allData[i][allData[i].keys()[ind]] } )
# else:
# results.update({h:0.0})
# allResults.append(results)
# print 'here'
# checkH = h
# if 'H.' not in checkH:
# checkH = checkH.replace('Or','H.Or')
# checkH = checkH.replace('And','H.And')
# checkH = checkH.replace('Then','H.Then')
# check = [np.array_equal(eval(checkH), m) for m in infer.evalHypMCMC]
# if not any(check):
# results.update({h:0.0})
# else:
# hIndex = check.index(True)
# results.update({h:infer.hypPosteriorMCMC[infer.hypMCMC[hIndex]]})
# raise Exception
# hypList = sorted(results,key=results.get,reverse=True)
# results = list()
# for data in allData:
# temp = list()
# for h in hypList:
# try:
# temp.append(data[h])
# except:
# temp.append(0.0)
# results.append(temp)
# for i in range(len(results)):
# fig.subplots_adjust(bottom=.2)
# ax = fig.add_subplot(2,2,i+1)
# ax.spines['bottom'].set_linewidth(10)
# ax.spines['left'].set_linewidth(10)
# ax.spines['right'].set_linewidth(0)
# ax.spines['top'].set_linewidth(0)
# ax.set_title(lesion[i],fontweight='bold')
# width=0.8
# bins = map(lambda x: x-width/2,range(1,len(results[i])+1))
# ax.bar(bins,results[i],width=width)
# ax.set_xticks(map(lambda x: x, range(1,len(results[i])+1)))
# ax.set_xticklabels(hypList,rotation=45, rotation_mode="anchor", ha="right")
# fig.subplots_adjust(hspace=.8)
# plt.savefig('charts/experiment'+str(stimCounter), dpi=fig.dpi)
# plt.close('all')
# if len(results) > len(infer.hypotheses):
# temp = len(infer.hypotheses)
# else:
# temp = len(results[0])
# print 'allMAXHYP'
# print allMaxHyp
# englishMaxHyp = list()
# for h in allMaxHyp:
# h = h.replace('Then','H.T')
# h = h.replace('And','H.A')
# h = h.replace('Or','H.O')
# # print h
# hEval = eval(h)
# if hEval[0] == '(':
# hEval = hEval[1:len(hEval)-1]
# englishMaxHyp.append(hEval)
# # print englishMaxHyp
# for i in ra:
# for j in rc:
# for k in range(len(allMaxHyp)):
# if k < 3:
# # print englishMaxHyp
# CSV.writerow([stimCounter, True if i is 1 else False, True if j is 1 else False,
# englishMaxHyp[k],allResults[0][allMaxHyp[k]]])
# allResultsCopy = allResults
# allDataCopy = allData
# allMaxHypCopy = allMaxHyp
# except:
# aMaxHyp = allMaxHyp[k]
# if 'H.' not in aMaxHyp:
# aMaxHyp = aMaxHyp.replace('Or','H.Or')
# aMaxHyp = aMaxHyp.replace('And','H.And')
# aMaxHyp = aMaxHyp.replace('Then','H.Then')
# check = [np.array_equal(eval(aMaxHyp), m) for m in infer.evalHypMCMC]
# if not any(check):
# h = aMaxHyp
# evalH = eval(h)
# if evalH[0] == '(':
# evalH = evalH[1:len(evalH)-1]
# print 'Here'
# print evalH
# CSV.writerow([stimCounter, True if i is 1 else False, True if j is 1 else False, k+1,
# evalH,0.0])
# else:
# index = check.index(True)
# h = aMaxHyp
# h = h.replace('Or','O')
# h = h.replace('And','A')
# h = h.replace('Then','T')
# evalH = eval(h)
# if evalH[0] == '(':
# evalH = evalH[1:len(evalH)-1]
# # print infer.hypotheses[index]
# # print allData[i*2+1*j]
# try:
# CSV.writerow([stimCounter, True if i is 1 else False, True if j is 1 else False, k+1,
# evalH, allData[i*2+1*j][infer.hypMCMC[index]]])
# except KeyError:
# print 'HEREE'
# CSV.writerow([stimCounter, True if i is 1 else False, True if j is 1 else False, k+1,
# evalH, 0.0])
# elif k==temp-1:
# CSV.writerow([stimCounter, True if i is 1 else False, True if j is 1 else False, k+1,
# H.englishHypotheses[infer.hypotheses.index(hypList[k])],results[i*2+j*1][k]])
# englishReference = [infer.hypotheses.index(i) for i in hypList]
# englishHypotheses = [H.englishHypotheses[i] for i in englishReference]
# # temp = englishHypotheses[-1]
# # temp = '"' + temp + '"'
# englishHypotheses = ['"' + i + '"' for i in englishHypotheses[0:3]]
# # englishHypotheses.append(temp)
# englishHypotheses = [','.join(englishHypotheses)]
# CSV2.writerow([stimCounter]+englishHypotheses)
stimCounter += 1
oFile.close()
# oFile2.close()
# oFile3.close()
"""
Parents turned around, any effecct?
Was it mentioned why parents peeking is a problem?
""" | mit |
tanmay987/deepLearning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
wesm/ibis | ibis/impala/tests/test_metadata.py | 1 | 4536 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from numpy import nan
from ibis.compat import unittest
from ibis.impala.metadata import parse_metadata
def _glue_lists_spacer(spacer, lists):
result = list(lists[0])
for lst in lists[1:]:
result.append(spacer)
result.extend(lst)
return result
class TestMetadataParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.spacer = ('', nan, nan)
cls.schema = [
('# col_name', 'data_type', 'comment'),
cls.spacer,
('foo', 'int', nan),
('bar', 'tinyint', nan),
('baz', 'bigint', nan)
]
cls.partitions = [
('# Partition Information', nan, nan),
('# col_name', 'data_type', 'comment'),
cls.spacer,
('qux', 'bigint', nan)
]
cls.info = [
('# Detailed Table Information', nan, nan),
('Database:', 'tpcds', nan),
('Owner:', 'wesm', nan),
('CreateTime:', 'Sun Nov 08 01:09:42 PST 2015', nan),
('LastAccessTime:', 'UNKNOWN', nan),
('Protect Mode:', 'None', nan),
('Retention:', '0', nan),
('Location:', ('hdfs://host-name:20500/my.db'
'/dbname.table_name'), nan),
('Table Type:', 'EXTERNAL_TABLE', nan),
('Table Parameters:', nan, nan),
('', 'EXTERNAL', 'TRUE'),
('', 'STATS_GENERATED_VIA_STATS_TASK', 'true'),
('', 'numRows', '183592'),
('', 'transient_lastDdlTime', '1447340941'),
]
cls.storage_info = [
('# Storage Information', nan, nan),
('SerDe Library:', ('org.apache.hadoop'
'.hive.serde2.lazy.LazySimpleSerDe'), nan),
('InputFormat:', 'org.apache.hadoop.mapred.TextInputFormat', nan),
('OutputFormat:',
'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',
nan),
('Compressed:', 'No', nan),
('Num Buckets:', '0', nan),
('Bucket Columns:', '[]', nan),
('Sort Columns:', '[]', nan),
('Storage Desc Params:', nan, nan),
('', 'field.delim', '|'),
('', 'serialization.format', '|')
]
cls.part_metadata = pd.DataFrame.from_records(
_glue_lists_spacer(cls.spacer, [cls.schema, cls.partitions,
cls.info, cls.storage_info]),
columns=['name', 'type', 'comment'])
cls.unpart_metadata = pd.DataFrame.from_records(
_glue_lists_spacer(cls.spacer, [cls.schema, cls.info,
cls.storage_info]),
columns=['name', 'type', 'comment'])
cls.parsed_part = parse_metadata(cls.part_metadata)
cls.parsed_unpart = parse_metadata(cls.unpart_metadata)
def test_table_params(self):
params = self.parsed_part.info['Table Parameters']
assert params['EXTERNAL'] is True
assert params['STATS_GENERATED_VIA_STATS_TASK'] is True
assert params['numRows'] == 183592
assert (params['transient_lastDdlTime'] ==
pd.Timestamp('2015-11-12 15:09:01'))
def test_partitions(self):
assert self.parsed_unpart.partitions is None
assert self.parsed_part.partitions == [('qux', 'bigint')]
def test_schema(self):
assert self.parsed_part.schema == [
('foo', 'int'),
('bar', 'tinyint'),
('baz', 'bigint')
]
def test_storage_info(self):
storage = self.parsed_part.storage
assert storage['Compressed'] is False
assert storage['Num Buckets'] == 0
def test_storage_params(self):
params = self.parsed_part.storage['Desc Params']
assert params['field.delim'] == '|'
assert params['serialization.format'] == '|'
| apache-2.0 |
itaiin/arrow | python/setup.py | 2 | 23194 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import glob
import os
import os.path as osp
import re
import shlex
import shutil
import sys
from Cython.Distutils import build_ext as _build_ext
import Cython
import pkg_resources
from setuptools import setup, Extension, Distribution
from os.path import join as pjoin
from distutils.command.clean import clean as _clean
from distutils.util import strtobool
from distutils import sysconfig
# Check if we're running 64-bit Python
is_64_bit = sys.maxsize > 2**32
if Cython.__version__ < '0.29':
raise Exception('Please upgrade to Cython 0.29 or newer')
setup_dir = os.path.abspath(os.path.dirname(__file__))
@contextlib.contextmanager
def changed_dir(dirname):
oldcwd = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(oldcwd)
class clean(_clean):
def run(self):
_clean.run(self)
for x in []:
try:
os.remove(x)
except OSError:
pass
class build_ext(_build_ext):
_found_names = ()
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
self.extensions = [ext for ext in self.extensions
if ext.name != '__dummy__']
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
def run(self):
self._run_cmake()
_build_ext.run(self)
# adapted from cmake_build_ext in dynd-python
# github.com/libdynd/dynd-python
description = "Build the C-extensions for arrow"
user_options = ([('cmake-generator=', None, 'CMake generator'),
('extra-cmake-args=', None, 'extra arguments for CMake'),
('build-type=', None,
'build type (debug or release), default release'),
('boost-namespace=', None,
'namespace of boost (default: boost)'),
('with-cuda', None, 'build the Cuda extension'),
('with-flight', None, 'build the Flight extension'),
('with-parquet', None, 'build the Parquet extension'),
('with-static-parquet', None, 'link parquet statically'),
('with-static-boost', None, 'link boost statically'),
('with-plasma', None, 'build the Plasma extension'),
('with-tensorflow', None,
'build pyarrow with TensorFlow support'),
('with-orc', None, 'build the ORC extension'),
('with-gandiva', None, 'build the Gandiva extension'),
('generate-coverage', None,
'enable Cython code coverage'),
('bundle-boost', None,
'bundle the (shared) Boost libraries'),
('bundle-arrow-cpp', None,
'bundle the Arrow C++ libraries')] +
_build_ext.user_options)
def initialize_options(self):
_build_ext.initialize_options(self)
self.cmake_generator = os.environ.get('PYARROW_CMAKE_GENERATOR')
if not self.cmake_generator and sys.platform == 'win32':
self.cmake_generator = 'Visual Studio 14 2015 Win64'
self.extra_cmake_args = os.environ.get('PYARROW_CMAKE_OPTIONS', '')
self.build_type = os.environ.get('PYARROW_BUILD_TYPE',
'release').lower()
self.boost_namespace = os.environ.get('PYARROW_BOOST_NAMESPACE',
'boost')
self.cmake_cxxflags = os.environ.get('PYARROW_CXXFLAGS', '')
if sys.platform == 'win32':
# Cannot do debug builds in Windows unless Python itself is a debug
# build
if not hasattr(sys, 'gettotalrefcount'):
self.build_type = 'release'
self.with_cuda = strtobool(
os.environ.get('PYARROW_WITH_CUDA', '0'))
self.with_flight = strtobool(
os.environ.get('PYARROW_WITH_FLIGHT', '0'))
self.with_parquet = strtobool(
os.environ.get('PYARROW_WITH_PARQUET', '0'))
self.with_static_parquet = strtobool(
os.environ.get('PYARROW_WITH_STATIC_PARQUET', '0'))
self.with_static_boost = strtobool(
os.environ.get('PYARROW_WITH_STATIC_BOOST', '0'))
self.with_plasma = strtobool(
os.environ.get('PYARROW_WITH_PLASMA', '0'))
self.with_tensorflow = strtobool(
os.environ.get('PYARROW_WITH_TENSORFLOW', '0'))
self.with_orc = strtobool(
os.environ.get('PYARROW_WITH_ORC', '0'))
self.with_gandiva = strtobool(
os.environ.get('PYARROW_WITH_GANDIVA', '0'))
self.generate_coverage = strtobool(
os.environ.get('PYARROW_GENERATE_COVERAGE', '0'))
self.bundle_arrow_cpp = strtobool(
os.environ.get('PYARROW_BUNDLE_ARROW_CPP', '0'))
self.bundle_boost = strtobool(
os.environ.get('PYARROW_BUNDLE_BOOST', '0'))
CYTHON_MODULE_NAMES = [
'lib',
'_csv',
'_cuda',
'_flight',
'_parquet',
'_orc',
'_plasma',
'gandiva']
def _run_cmake(self):
# The directory containing this setup.py
source = osp.dirname(osp.abspath(__file__))
# The staging directory for the module being built
build_temp = pjoin(os.getcwd(), self.build_temp)
build_lib = os.path.join(os.getcwd(), self.build_lib)
saved_cwd = os.getcwd()
if not os.path.isdir(self.build_temp):
self.mkpath(self.build_temp)
# Change to the build directory
with changed_dir(self.build_temp):
# Detect if we built elsewhere
if os.path.isfile('CMakeCache.txt'):
cachefile = open('CMakeCache.txt', 'r')
cachedir = re.search('CMAKE_CACHEFILE_DIR:INTERNAL=(.*)',
cachefile.read()).group(1)
cachefile.close()
if (cachedir != build_temp):
return
static_lib_option = ''
cmake_options = [
'-DPYTHON_EXECUTABLE=%s' % sys.executable,
static_lib_option,
]
if self.cmake_generator:
cmake_options += ['-G', self.cmake_generator]
if self.with_cuda:
cmake_options.append('-DPYARROW_BUILD_CUDA=on')
if self.with_flight:
cmake_options.append('-DPYARROW_BUILD_FLIGHT=on')
if self.with_parquet:
cmake_options.append('-DPYARROW_BUILD_PARQUET=on')
if self.with_static_parquet:
cmake_options.append('-DPYARROW_PARQUET_USE_SHARED=off')
if not self.with_static_boost:
cmake_options.append('-DPYARROW_BOOST_USE_SHARED=on')
else:
cmake_options.append('-DPYARROW_BOOST_USE_SHARED=off')
if self.with_plasma:
cmake_options.append('-DPYARROW_BUILD_PLASMA=on')
if self.with_tensorflow:
cmake_options.append('-DPYARROW_USE_TENSORFLOW=on')
if self.with_orc:
cmake_options.append('-DPYARROW_BUILD_ORC=on')
if self.with_gandiva:
cmake_options.append('-DPYARROW_BUILD_GANDIVA=on')
if len(self.cmake_cxxflags) > 0:
cmake_options.append('-DPYARROW_CXXFLAGS={0}'
.format(self.cmake_cxxflags))
if self.generate_coverage:
cmake_options.append('-DPYARROW_GENERATE_COVERAGE=on')
if self.bundle_arrow_cpp:
cmake_options.append('-DPYARROW_BUNDLE_ARROW_CPP=ON')
# ARROW-1090: work around CMake rough edges
if 'ARROW_HOME' in os.environ and sys.platform != 'win32':
pkg_config = pjoin(os.environ['ARROW_HOME'], 'lib',
'pkgconfig')
os.environ['PKG_CONFIG_PATH'] = pkg_config
del os.environ['ARROW_HOME']
if self.bundle_boost:
cmake_options.append('-DPYARROW_BUNDLE_BOOST=ON')
cmake_options.append('-DCMAKE_BUILD_TYPE={0}'
.format(self.build_type.lower()))
if self.boost_namespace != 'boost':
cmake_options.append('-DBoost_NAMESPACE={}'
.format(self.boost_namespace))
extra_cmake_args = shlex.split(self.extra_cmake_args)
build_tool_args = []
if sys.platform == 'win32':
if not is_64_bit:
raise RuntimeError('Not supported on 32-bit Windows')
else:
build_tool_args.append('--')
if os.environ.get('PYARROW_BUILD_VERBOSE', '0') == '1':
cmake_options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
if os.environ.get('PYARROW_PARALLEL'):
build_tool_args.append(
'-j{0}'.format(os.environ['PYARROW_PARALLEL']))
# Generate the build files
print("-- Running cmake for pyarrow")
self.spawn(['cmake'] + extra_cmake_args + cmake_options + [source])
print("-- Finished cmake for pyarrow")
# Do the build
print("-- Running cmake --build for pyarrow")
self.spawn(['cmake', '--build', '.', '--config', self.build_type]
+ build_tool_args)
print("-- Finished cmake --build for pyarrow")
if self.inplace:
# a bit hacky
build_lib = saved_cwd
# Move the libraries to the place expected by the Python build
try:
os.makedirs(pjoin(build_lib, 'pyarrow'))
except OSError:
pass
if sys.platform == 'win32':
build_prefix = ''
else:
build_prefix = self.build_type
print('Bundling includes: ' + pjoin(build_prefix, 'include'))
if os.path.exists(pjoin(build_lib, 'pyarrow', 'include')):
shutil.rmtree(pjoin(build_lib, 'pyarrow', 'include'))
shutil.move(pjoin(build_prefix, 'include'),
pjoin(build_lib, 'pyarrow'))
# Move the built C-extension to the place expected by the Python
# build
self._found_names = []
for name in self.CYTHON_MODULE_NAMES:
built_path = self.get_ext_built(name)
if not os.path.exists(built_path):
print('Did not find {0}'.format(built_path))
if self._failure_permitted(name):
print('Cython module {0} failure permitted'
.format(name))
continue
raise RuntimeError('pyarrow C-extension failed to build:',
os.path.abspath(built_path))
cpp_generated_path = self.get_ext_generated_cpp_source(name)
if not os.path.exists(cpp_generated_path):
raise RuntimeError('expected to find generated C++ file '
'in {0!r}'.format(cpp_generated_path))
# The destination path to move the generated C++ source to
# (for Cython source coverage)
cpp_path = pjoin(build_lib, self._get_build_dir(),
os.path.basename(cpp_generated_path))
if os.path.exists(cpp_path):
os.remove(cpp_path)
# The destination path to move the built C extension to
ext_path = pjoin(build_lib, self._get_cmake_ext_path(name))
if os.path.exists(ext_path):
os.remove(ext_path)
self.mkpath(os.path.dirname(ext_path))
print('Moving generated C++ source', cpp_generated_path,
'to build path', cpp_path)
shutil.move(cpp_generated_path, cpp_path)
print('Moving built C-extension', built_path,
'to build path', ext_path)
shutil.move(built_path, ext_path)
self._found_names.append(name)
if os.path.exists(self.get_ext_built_api_header(name)):
shutil.move(self.get_ext_built_api_header(name),
pjoin(os.path.dirname(ext_path),
name + '_api.h'))
if self.bundle_arrow_cpp:
print(pjoin(build_lib, 'pyarrow'))
move_shared_libs(build_prefix, build_lib, "arrow")
move_shared_libs(build_prefix, build_lib, "arrow_python")
if self.with_cuda:
move_shared_libs(build_prefix, build_lib, "arrow_gpu")
if self.with_flight:
move_shared_libs(build_prefix, build_lib, "arrow_flight")
if self.with_plasma:
move_shared_libs(build_prefix, build_lib, "plasma")
if self.with_gandiva:
move_shared_libs(build_prefix, build_lib, "gandiva")
if self.with_parquet and not self.with_static_parquet:
move_shared_libs(build_prefix, build_lib, "parquet")
if not self.with_static_boost and self.bundle_boost:
move_shared_libs(
build_prefix, build_lib,
"{}_filesystem".format(self.boost_namespace),
implib_required=False)
move_shared_libs(
build_prefix, build_lib,
"{}_system".format(self.boost_namespace),
implib_required=False)
move_shared_libs(
build_prefix, build_lib,
"{}_regex".format(self.boost_namespace),
implib_required=False)
if sys.platform == 'win32':
# zlib uses zlib.dll for Windows
zlib_lib_name = 'zlib'
move_shared_libs(build_prefix, build_lib, zlib_lib_name,
implib_required=False)
if self.with_plasma:
# Move the plasma store
source = os.path.join(self.build_type, "plasma_store_server")
target = os.path.join(build_lib,
self._get_build_dir(),
"plasma_store_server")
shutil.move(source, target)
def _failure_permitted(self, name):
if name == '_parquet' and not self.with_parquet:
return True
if name == '_plasma' and not self.with_plasma:
return True
if name == '_orc' and not self.with_orc:
return True
if name == '_flight' and not self.with_flight:
return True
if name == '_cuda' and not self.with_cuda:
return True
if name == 'gandiva' and not self.with_gandiva:
return True
return False
def _get_build_dir(self):
# Get the package directory from build_py
build_py = self.get_finalized_command('build_py')
return build_py.get_package_dir('pyarrow')
def _get_cmake_ext_path(self, name):
# This is the name of the arrow C-extension
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if suffix is None:
suffix = sysconfig.get_config_var('SO')
filename = name + suffix
return pjoin(self._get_build_dir(), filename)
def get_ext_generated_cpp_source(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
return pjoin(head, tail + ".cpp")
else:
return pjoin(name + ".cpp")
def get_ext_built_api_header(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
return pjoin(head, tail + "_api.h")
else:
return pjoin(name + "_api.h")
def get_ext_built(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
suffix = sysconfig.get_config_var('SO')
# Visual Studio seems to differ from other generators in
# where it places output files.
if self.cmake_generator.startswith('Visual Studio'):
return pjoin(head, self.build_type, tail + suffix)
else:
return pjoin(head, tail + suffix)
else:
suffix = sysconfig.get_config_var('SO')
return pjoin(self.build_type, name + suffix)
def get_names(self):
return self._found_names
def get_outputs(self):
# Just the C extensions
# regular_exts = _build_ext.get_outputs(self)
return [self._get_cmake_ext_path(name)
for name in self.get_names()]
def move_shared_libs(build_prefix, build_lib, lib_name,
implib_required=True):
if sys.platform == 'win32':
# Move all .dll and .lib files
libs = [lib_name + '.dll']
if implib_required:
libs.append(lib_name + '.lib')
for filename in libs:
shutil.move(pjoin(build_prefix, filename),
pjoin(build_lib, 'pyarrow', filename))
else:
_move_shared_libs_unix(build_prefix, build_lib, lib_name)
def _move_shared_libs_unix(build_prefix, build_lib, lib_name):
shared_library_prefix = 'lib'
if sys.platform == 'darwin':
shared_library_suffix = '.dylib'
else:
shared_library_suffix = '.so'
lib_filename = (shared_library_prefix + lib_name +
shared_library_suffix)
# Also copy libraries with ABI/SO version suffix
if sys.platform == 'darwin':
lib_pattern = (shared_library_prefix + lib_name +
".*" + shared_library_suffix[1:])
libs = glob.glob(pjoin(build_prefix, lib_pattern))
else:
libs = glob.glob(pjoin(build_prefix, lib_filename) + '*')
if not libs:
raise Exception('Could not find library:' + lib_filename +
' in ' + build_prefix)
# Longest suffix library should be copied, all others symlinked
libs.sort(key=lambda s: -len(s))
print(libs, libs[0])
lib_filename = os.path.basename(libs[0])
shutil.move(pjoin(build_prefix, lib_filename),
pjoin(build_lib, 'pyarrow', lib_filename))
for lib in libs[1:]:
filename = os.path.basename(lib)
link_name = pjoin(build_lib, 'pyarrow', filename)
if not os.path.exists(link_name):
os.symlink(lib_filename, link_name)
# If the event of not running from a git clone (e.g. from a git archive
# or a Python sdist), see if we can set the version number ourselves
default_version = '0.12.0-SNAPSHOT'
if (not os.path.exists('../.git')
and not os.environ.get('SETUPTOOLS_SCM_PRETEND_VERSION')):
if os.path.exists('PKG-INFO'):
# We're probably in a Python sdist, setuptools_scm will handle fine
pass
else:
os.environ['SETUPTOOLS_SCM_PRETEND_VERSION'] = \
default_version.replace('-SNAPSHOT', 'a0')
# See https://github.com/pypa/setuptools_scm#configuration-parameters
scm_version_write_to_prefix = os.environ.get(
'SETUPTOOLS_SCM_VERSION_WRITE_TO_PREFIX', setup_dir)
def parse_git(root, **kwargs):
"""
Parse function for setuptools_scm that ignores tags for non-C++
subprojects, e.g. apache-arrow-js-XXX tags.
"""
from setuptools_scm.git import parse
kwargs['describe_command'] =\
'git describe --dirty --tags --long --match "apache-arrow-[0-9].*"'
return parse(root, **kwargs)
with open('README.md') as f:
long_description = f.read()
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
install_requires = (
'numpy >= 1.14',
'six >= 1.0.0',
'futures; python_version < "3.2"',
'enum34 >= 1.1.6; python_version < "3.4"',
)
# Only include pytest-runner in setup_requires if we're invoking tests
if {'pytest', 'test', 'ptr'}.intersection(sys.argv):
setup_requires = ['pytest-runner']
else:
setup_requires = []
setup(
name='pyarrow',
packages=['pyarrow', 'pyarrow.tests'],
zip_safe=False,
package_data={'pyarrow': ['*.pxd', '*.pyx', 'includes/*.pxd']},
include_package_data=True,
distclass=BinaryDistribution,
# Dummy extension to trigger build_ext
ext_modules=[Extension('__dummy__', sources=[])],
cmdclass={
'clean': clean,
'build_ext': build_ext
},
entry_points={
'console_scripts': [
'plasma_store = pyarrow:_plasma_store_entry_point'
]
},
use_scm_version={
'root': os.path.dirname(setup_dir),
'parse': parse_git,
'write_to': os.path.join(scm_version_write_to_prefix,
'pyarrow/_generated_version.py')
},
setup_requires=['setuptools_scm', 'cython >= 0.29'] + setup_requires,
install_requires=install_requires,
tests_require=['pytest', 'pandas', 'hypothesis',
'pathlib2; python_version < "3.4"'],
description='Python library for Apache Arrow',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License, Version 2.0',
maintainer='Apache Arrow Developers',
maintainer_email='[email protected]',
test_suite='pyarrow.tests',
url='https://arrow.apache.org/'
)
| apache-2.0 |
aerler/WRF-Projects | src/projects/GreatLakes/__init__.py | 1 | 3398 | '''
Created on 2016-04-13
A package that contains settings for the GreatLakes region projects for use with the geodata package.
@author: Andre R. Erler, GPL v3
'''
# import figure settings
from .figure_settings import getVariableSettings, getFigureSettings, figure_folder
from warnings import warn
# import map projection settings (basemap)
try:
from .map_settings import getSetup, map_folder
except IOError:
warn("Error importing map settings - possibly due to missing shape data.")
except ImportError:
warn("Error importing map settings - 'basemap' is likely not installed.")
# N.B.: apparently Basemap is not maintained anymore and dedent does not exist in matplotlib anymore...
# but replacing dedent with cleandoc as shown below in mpl_toolkits/basemap/proj.py and
# mpl_toolkits/basemap/__init__.py seems to do the trick
# try:
# from matplotlib.cbook import dedent
# except ImportError:
# from inspect import cleandoc as dedent
## import load functions with GreatLakes experiments into local namespace
try:
# import relevant WRF experiments
from .WRF_experiments import WRF_exps, WRF_ens
# import WRF load functions
from .WRF_experiments import loadWRF, loadWRF_Shp, loadWRF_Stn, loadWRF_TS, loadWRF_ShpTS, loadWRF_StnTS, loadWRF_Ensemble, loadWRF_ShpEns, loadWRF_StnEns
except (ImportError,IOError):
WRF_exps = None; WRF_ens = None
warn("Error importing WRF experiments.")
try:
# also load CESM experiments and functions
from projects.CESM_experiments import CESM_exps, CESM_ens
# import CESM load functions
from projects.CESM_experiments import loadCESM, loadCESM_Shp, loadCESM_Stn, loadCESM_TS, loadCESM_ShpTS, loadCESM_StnTS, loadCESM_Ensemble, loadCESM_ShpEns, loadCESM_StnEns
except (ImportError,IOError):
CESM_exps = None; CESM_ens = None
warn("Error importing CESM experiments.")
# add relevant experiments to general load functions
from datasets.common import loadDataset, loadEnsembleTS, addLoadFcts
try:
from datasets.Unity import loadUnity, loadUnity_Shp, loadUnity_Stn, loadUnity_ShpTS, loadUnity_StnTS # loadUnity_TS doesn't exist
except (ImportError,IOError):
warn("Error importing Unified Observational Dataset 'Unity'.")
unity_grid = 'glb1_d02' # Unified Dataset default grid
# N.B.: it is recommended to import Unity load fcts. from here
# modify functions (wont affect modified WRF/CESM functions)
addLoadFcts(locals(), locals(), unity_grid=unity_grid , WRF_exps=WRF_exps, WRF_ens=WRF_ens,
CESM_exps=CESM_exps, CESM_ens=CESM_ens)
## import shape dictionaries
# provinces, major basins and lakes etc.
try:
from projects.WSC_basins import basins, provinces, great_lakes, gauges # import the dicts with unique entries
except (ImportError,IOError):
warn("Error importing shape files and/or WSC module.")
# southern Ontario watersheds
try:
from .SON_settings import son_ws_names, son_watersheds
except (ImportError,IOError):
warn("Error importing shape files from SON module.")
# import figure with hydro settings
from .analysis_settings import loadStationEnsemble, loadShapeEnsemble, loadShapeObservations # load datasets
from .analysis_settings import loadStationFit, loadShapeFit
from .analysis_settings import exps_rc, variables_rc, constraints_rc
from .analysis_settings import climFigAx, climPlot, evaFigAx, distPlot, quantPlot # plotting
| gpl-3.0 |
shenzebang/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
fabioticconi/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 25 | 25114 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
| bsd-3-clause |
ProkopHapala/SimpleSimulationEngine | cpp/sketches_SDL/Molecular/python/CLCFGO_coulomb_derivs.py | 1 | 7802 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spc
'''
We have oribtals
Phi_1 = c_a * chi_a + c_b * chi_b
Phi_2 = c_c * chi_c + c_d * chi_d
where chi_i are gaussian type basis functions
and c_i are expansion coefficients
electron density of molecular orbitals Rho_1 = <phi_1|phi_1>
can be expressed using auxuliary gaussian basisfunctionc
rho_ab = chi_a * chi_b
Rho_1 = sim_ab c_a*c_b*S_ab * rho_ab
= sim_ab q_ab * rho_ab
where q_ab = c_a*c_b*S_ab is charge of the auxuliary electron blob
with S_ab being overlap integral between the basis functions chi_a*chi_b
we can use collective index i=ab and j=cd
qi = Sab*ca*cb
qj = Scd*cc*cd
The repulsion between blobs qi,qj can be expressed as
qi*qj / =
'''
const_hbar_SI = 1.054571817e-34; #< [J.s] #6.582119569e-16 # [eV/s]
const_Me_SI = 9.10938356e-31; #< [kg]
const_e_SI = 1.602176620898e-19; #< [Coulomb]
const_eps0_SI = 8.854187812813e-12; #< [F.m = Coulomb/(Volt*m)]
const_eV_SI = 1.602176620898e-19; #< [J]
const_Angstroem_SI = 1.0e-10;
const_K_SI = const_hbar_SI*const_hbar_SI/const_Me_SI;
const_El_SI = const_e_SI*const_e_SI/(4.*np.pi*const_eps0_SI);
const_Ry_SI = 0.5 * const_El_SI*const_El_SI/const_K_SI;
const_Ry_eV = 13.6056925944;
const_El_eVA = const_El_SI/( const_e_SI*const_Angstroem_SI );
const_K_eVA = (const_El_eVA*const_El_eVA)/(2*const_Ry_eV);
const_Ke_eVA = const_K_eVA*1.5;
def Coulomb( r, s ):
'''
double ir = 1./r; //(r+1.e-8);
double is = 1./s; //(s+1.e-8);
double r_s = r*is;
double r_2s = M_SQRT1_2 * r_s; // This is for charge-density blobs (assuming si,sj comes from charge denisty)
//double r_2s = r_s;
//double r_2s = M_SQRT2 * r_s; // This is for wavefunction blobs (assuming si,sj comes from wavefunction)
double e1 = ir * const_El_eVA;
double e2 = erf( r_2s ); // ToDo : this should be possible to compute together !!!
double g = exp( -r_2s*r_2s ) * const_F2;
double f1 = -e1*ir;
double f2 = g*is*0.5;
double e1f2 = e1*f2;
fr = (f1*e2 + e1f2)*ir;
fs = e1f2 *r_s * is;
return e1 * e2;
'''
# ToDo: maybe we can do without s=sqrt(s2) and r=sqrt(r2)
#constexpr const double const_F2 = -2.*sqrt(2./np.pi);
#const_F2 = M_2_SQRTPI * M_SQRT2;
M_SQRT2 = 1.41421356237
M_SQRT1_2 = 1/M_SQRT2
const_F2 = 2*np.sqrt(2/np.pi)
ir = 1./r #(r+1.e-8);
is_ = 1./s #(s+1.e-8);
r_s = r*is_
r_2s = M_SQRT1_2 * r_s; # This is for charge-density blobs (assuming si,sj comes from charge denisty)
#r_2s = r_s;
#r_2s = M_SQRT2 * r_s; # This is for wavefunction blobs (assuming si,sj comes from wavefunction)
e1 = ir * const_El_eVA
e2 = spc.erf( r_2s )
g = np.exp( -r_2s*r_2s ) * const_F2
f1 = -e1*ir
#f2 = g*is_ # This is for wavefunction blobs (assuming si,sj comes from wavefunction)
f2 = g*is_*0.5 # This is for charge-density blobs (assuming si,sj comes from charge denisty)
e1f2 = e1*f2
fr = (f1*e2 + e1f2)*ir
fs = e1f2 *r_s * is_
E = e1 * e2
#for i in range(len(r)):
# print "Gauss::Coulomb r %g s %g E %g fr %g " %(r[i],s, E[i], fr[i] )
return E,fr,fs
def product3D_s_deriv( si,pi, sj,pj ):
''' returns
S, p,
dSsi, dSsj,
dXsi, dXsj,
dXxi, dXxj,
dCsi, dCsj, dCr
'''
si2 = si*si
sj2 = sj*sj
s2 = si2 + sj2
is2 = 1/s2
is4 = is2*is2
sqrtis2 = np.sqrt(is2)
s = si*sj*sqrtis2 # size
p = pj*(si2*is2) + pi*(sj2*is2) # position
#X = ( si2*xj + sj2*xi )*inv;
inv3_2 = sqrtis2*is2
dSsi = sj*sj2*inv3_2
dSsj = si*si2*inv3_2
dp = pi-pj
dXsi = dp*(-2*si*sj2*is4)
dXsj = dp*( 2*sj*si2*is4)
dXxi = sj2*is2
dXxj = si2*is2
#r2 = dp.norm2()
r2 = dp*dp
a2 = 2.*(si*sj)*is2
a = np.sqrt(a2)
e1 = a2*a
e2 = np.exp( -r2*is2 )
f1 = 3.*a * (si2-sj2)*is4
f2 = 2.*e2 * r2*is4
dCsi = e1*f2*si - e2*f1*sj
dCsj = e1*f2*sj + e2*f1*si
C = e1*e2 # Overlap
dCr = C*(-2.*is2) # derivative is correct, tested !
# TODO : How is it possible that derivative is the same as value (just rescaled) ????
#double logC = wxi*xi + wxj*xj - wx*X;
#double C = np.exp(-logC) * Ci * Cj
#try:
# for i in range(len(r2)):
# print "product3D_s_deriv r %g s %g S %g dS %g " %(np.sqrt(r2[i]),s, S[i], dCr[i] )
#except:
# pass
return C,s,p, dCr*dp, (dSsi,dXsi,dXxi,dCsi), (dSsj,dXsj,dXxj,dCsj)
def checkNumDeriv( x, func, dfunc, name ):
dy = dfunc( x )
y = func(x)
dynum,xnum = numDeriv( x, y )
#print y
#print "y.shape, ynum.shape ", y.shape, ynum.shape
plotVsNum( x,dy,dynum, name )
plt.plot(x, y,'-.', label=name+"_F" )
if __name__ == "__main__":
#s = np.arange( 0.1, 5.0, 0.05 )
#rs = np.arange( 0.1, 5.0, 0.05 )
#S = np.arange( 1.25, 5.0, 0.05 )
#r = 1.5 + 0.*s
ca = 1.0
cb = 1.0
cc = 1.0
cd = 1.0
sa = 1.0
sb = 1.0
sc = 1.0
sd = 1.0
dx = 0.1
xa = np.arange( 0.01, 3.0, dx )
xb = 0.0
xc = -1.5
xd = 0.0
xs_ = (xa[1:]+xa[:-1])*0.5
# overlaps
Sab, si, xab, dQab, dA, dB = product3D_s_deriv( sa,xa, sb,xb )
Scd, sj, xcd, dQcd, dC, dD = product3D_s_deriv( sc,xc, sd,xd )
# coulomb
s2 = si*si + sj*sj
s = np.sqrt(s2)
r = xab-xcd
e, fx, fs = Coulomb( r, s )
dXxi = dA[2] + xa*0
plt.plot( xa, Sab , label='Sab' )
plt.plot( xa, r , label='r' )
#plt.plot( xa, dQab, label='dSab_ana' )
#plt.plot( xs_, (Sab[1:]-Sab[:-1])/dx,':', label='dSab_num' )
qij = 4*Scd*Sab
#qij = Sab
dQij = 4*Scd*dQab
# Q: Why we dont need derivatives of charge ????
#Fx = -fx*0.5*dA[1] # This works for zero initial distance between blobs
Fx = fx*r*dXxi
Fpi = fx*r*qij # see
fxi = Fpi*dXxi
print "Scd, 4*Scd ", Scd, 4*Scd
print "For some reason each charge is scaled by 2.0"
E = e*qij
F = fxi + e*dQij # total derivtive F = dE/dx = d(e*qi)/dx
# Note: e,fx=de/dx are NOT multiplied by charge Qij
# Total force Fx = dE/dx = d(e*q)/dx = q*(de/dx) + e*(dq/dx)
for i in range(len(r)):
#print "Gauss::Coulomb r %g s %g E %g Fx %g fx %g " %(r[i], s, E[i], Fx[i], fx[i] )
#print "fromRho r %g s %g E %g Fx %g fx %g " %((xa-xb)[i], s, E[i], Fx[i], fx[i] )
#print "CoublombElement r %g s %g E %g fr %g qij %g frq %g fij %g" %((xa-xb)[i], s, e[i], fx[i], qij[i], (fx*qij)[i], (fx*qij*r)[i] )
#print "fromRho r %g s %g | E %g e %g qij %g(%g) | Fx %g Fpi %g dQij %g " %((xa-xb)[i], si, E[i],e[i]*2*Scd,qij[i],Sab[i], Fx[i], Fpi[i],dQij[i] )
print "fromRho r %g Eqi %g Cij %g | Fpi %g dXxi %g fxi %g Fxi %g " %((xa-xb)[i], e[i]*2*Scd, Sab[i], Fpi[i], dXxi[i], fxi[i], F[i] );
pass
# ==== Derivative of Coulomb term without considering changes of Charges
#plt.plot( xa, e , label='e' )
#plt.plot( xa, Fx, label='dedx_ana' )
#plt.plot( xs_, (e[1:]-e[:-1])/dx,':', label='dedx_num' )
# ==== Derivative of Coulomb term with considering the Charges
plt.plot( xa, E, label='E' )
plt.plot( xa, F, label='dEdx_ana' )
plt.plot( xs_, (E[1:]-E[:-1])/dx,':', label='dEdx_num', lw=3 )
plt.plot( xa, fxi, label='fxi' )
#plt.plot( xa, fx, label='fx' )
#plt.plot( xa, dXxi, label='dXxi' )
plt.grid()
plt.legend()
plt.show()
| mit |
musically-ut/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
subutai/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkagg.py | 70 | 4184 | """
Render to gtk from agg
"""
from __future__ import division
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_gtkagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKAgg(thisFig)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print 'backend_gtkagg.new_figure_manager done'
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print 'FigureCanvasGTKAgg.configure_event'
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print 'FigureCanvasGTKAgg.configure_event end'
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print 'FigureCanvasGTKAgg.render_figure'
FigureCanvasAgg.draw(self)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure pixmap', pixmap
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba(0,0)
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure done'
def blit(self, bbox=None):
if DEBUG: print 'FigureCanvasGTKAgg.blit'
if DEBUG: print 'FigureCanvasGTKAgg.blit', self._pixmap
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print 'FigureCanvasGTKAgg.done'
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
| agpl-3.0 |
shusenl/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
UASLab/ImageAnalysis | scripts/4c-surface-outliers1.py | 1 | 4820 | #!/usr/bin/env python3
import argparse
import commands
import cPickle as pickle
import cv2
import fnmatch
import itertools
#import json
import math
import matplotlib.pyplot as plt
import numpy as np
import os.path
import scipy.spatial
import sys
sys.path.append('../lib')
import project
import transformations
def meta_stats(report):
sum = 0.0
count = len(report)
for line in report:
value = line[0]
sum += value
average = sum / len(report)
print "average value = %.2f" % (average)
sum = 0.0
for line in report:
value = line[0]
diff = average - value
sum += diff**2
stddev = math.sqrt(sum / count)
print "standard deviation = %.2f" % (stddev)
return average, stddev
parser = argparse.ArgumentParser(description='Compute Delauney triangulation of matches.')
parser.add_argument('project', help='project directory')
parser.add_argument('--stddev', default=5, type=int, help='standard dev threshold')
args = parser.parse_args()
proj = project.ProjectMgr(args.project)
proj.load_image_info()
print "Loading original (direct) matches ..."
matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )
print "Loading fitted (sba) matches..."
matches_sba = pickle.load( open( args.project + "/matches_sba", "rb" ) )
# custom slope routine
def my_slope(p1, p2, z1, z2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
dz = z2 - z1
hdist = math.sqrt(dx**2 + dy**2)
if hdist > 0.00001:
slope = dz / hdist
else:
slope = 0
return slope
def compute_surface_outliers():
# iterate through the sba match dictionary and build a list of feature
# points and heights (in x=east,y=north,z=up coordinates)
print "Building Delaunay triangulation..."
raw_points = []
raw_values = []
sum_values = 0.0
for match in matches_sba:
ned = match[0]
raw_points.append( [ned[1], ned[0]] )
raw_values.append( -ned[2] )
sum_values += -ned[2]
avg_height = sum_values / len(matches_sba)
print "Average elevation = %.1f" % ( avg_height )
tri = scipy.spatial.Delaunay(np.array(raw_points))
# look for outliers by comparing depth of a point with the average
# depth of it's neighbors. Outliers will tend to stick out this way
# (although you could be looking at the top of a flag pole so it's not
# a guarantee of a bad value.)
print "raw points =", len(raw_points)
print "tri points =", len(tri.points)
print "neighbors:", len(tri.vertex_neighbor_vertices[0]), len(tri.vertex_neighbor_vertices[1])
#print "neighbor[0]:\n", tri.vertex_neighbor_vertices[0][0], tri.vertex_neighbor_vertices[0][1]
print "Computing average slope to neighbors..."
indices, indptr = tri.vertex_neighbor_vertices
report = []
x = []; y = []; slope = []
for i in range(len(tri.points)):
pi = raw_points[i]
zi = raw_values[i]
sum_slope = 0.0
neighbors = indptr[indices[i]:indices[i+1]]
if len(neighbors) == 0:
continue
# print neighbors
for j in neighbors:
pj = raw_points[j]
zj = raw_values[j]
sum_slope += my_slope(pi, pj, zi, zj)
avg_slope = sum_slope / len(neighbors)
# print i, avg_slope
report.append( (avg_slope, i) )
x.append(raw_points[i][0])
y.append(raw_points[i][1])
slope.append(avg_slope)
# plot results
do_plot = False
if do_plot:
x = np.array(x)
y = np.array(y)
slope_diff = np.array(slope)
plt.scatter(x, y, c=slope)
plt.show()
average, stddev = meta_stats(report)
report = sorted(report, key=lambda fields: abs(fields[0]), reverse=True)
delete_list = []
for line in report:
slope = line[0]
index = line[1]
if abs(average - slope) >= args.stddev * stddev:
print "index=", index, "slope=", slope
delete_list.append( index )
delete_list = sorted(delete_list, reverse=True)
for index in delete_list:
#print "deleting", index
matches_direct.pop(index)
matches_sba.pop(index)
return len(delete_list)
deleted_sum = 0
result = compute_surface_outliers()
while result > 0:
deleted_sum += result
result = compute_surface_outliers()
if deleted_sum > 0:
result=raw_input('Remove ' + str(deleted_sum) + ' outliers from the original matches? (y/n):')
if result == 'y' or result == 'Y':
# write out the updated match dictionaries
print "Writing original matches..."
pickle.dump(matches_direct, open(args.project+"/matches_direct", "wb"))
print "Writing sba matches..."
pickle.dump(matches_sba, open(args.project+"/matches_sba", "wb"))
| mit |
sarahgrogan/scikit-learn | sklearn/tree/export.py | 78 | 15814 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
yyjiang/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
pixki/redesestocasticas | phasedist.py | 1 | 6701 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: pixki
# @Date: 2015-11-11 12:07:40
# @Last Modified by: jairo
# @Last Modified time: 2015-12-10 22:29:09
import numpy as np
from scipy.stats import expon, erlang, rv_continuous
import matplotlib.pyplot as plt
import argparse
import sys
import numpy.random as mtrand
class hyperexp(rv_continuous):
"""An HyperExponential Random Variable
"""
def __init__(self, alpha=0.5, lambda1=1.0, lambda2=1.0):
self.alpha = alpha
self.lambda1 = lambda1
self.lambda2 = lambda2
def rvs(self, size=1):
vsample = np.vectorize(self._single_sample)
return np.fromfunction(vsample, (size,))
def _single_sample(self, size):
U1 = mtrand.random()
if U1 <= self.alpha:
scale = self.lambda1
else:
scale = self.lambda2
U2 = mtrand.random()
return -np.log(U2)/scale
def pdf(self, x):
a = self.alpha*self.lambda1*np.exp(self.lambda1*-x)
b = (1-self.alpha)*self.lambda2*np.exp(self.lambda2*-x)
return a + b
def mean(self):
return (self.alpha / self.lambda1) + ((1-self.alpha) / self.lambda2)
def standard_dev(self):
a = (self.alpha/(self.lambda1**2)) + ((1-self.alpha)/(self.lambda2**2))
return np.sqrt(2*a + self.mean()**2)
def cdf(self, x):
a = self.alpha*(-np.exp(self.lambda1*-x))
b = (1-self.alpha)*(-np.exp(self.lambda2*-x))
return a + b + 1
def CoV(self):
a = np.sqrt(2*self.alpha/self.lambda1 + 2*(1-self.alpha)/self.lambda2 -
(self.alpha/self.lambda1 + (1-self.alpha)/self.lambda2)**2)
return a/self.mean()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--stages', type=int, required=False,
help='Etapas de la distribución')
parser.add_argument('-l', '--lambdap', type=float, required=True,
nargs='+',
help='Parámetro lambda de cada distribución')
parser.add_argument('-r', '--runs', type=int, required=True,
help='Ejecuciones a realizar por cada simulación')
parser.add_argument('-o', '--output', type=str, required=False,
help='Archivo de salida para la grafica')
parser.add_argument('-d', '--dist', type=str, required=True,
choices=['erlang', 'expon', 'hyperexp'],
help='Distribución a emplear para la simulación')
parser.add_argument('--no-graph', required=False,
help='Suprime la salida como gráfica',
dest='graph', action='store_false')
parser.add_argument('--graph', required=False,
help='Habilita la salida como gráfica (usar con [-o])',
dest='graph', action='store_true')
parser.add_argument('-p', '--probability', required=False, type=float,
help='Probabilidad para la distribución Hiperexp.')
parser.set_defaults(graph=True)
args = parser.parse_args()
# msg = 'Distribución {3} con {0} etapas (lambda={1}) en {2} ejecuciones'
# print msg.format(args.stages, args.lambdap, args.runs, args.dist)
fig, ax = plt.subplots(1, 1)
if args.dist in 'erlang':
if args.stages <= 0:
print 'Error: se necesita un número válido de etapas'
sys.exit(1)
lambdap = args.lambdap[0]
mean, var, skew, kurt = erlang.stats(args.stages, scale=lambdap,
moments='mvsk')
x = np.linspace(erlang.ppf(0.00001, args.stages, scale=lambdap),
erlang.ppf(0.99999, args.stages, scale=lambdap),
num=1000)
rv = erlang(args.stages, scale=lambdap)
ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='Erlang PDF')
# Generate random numbers with this distribution
r = erlang.rvs(args.stages, scale=lambdap, size=args.runs)
ax.hist(r, bins=20, normed=True, histtype='stepfilled', alpha=0.4,
label='Experimental values')
meanexp = np.mean(r)
varexp = np.var(r)
print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp,
mean)
print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var)
print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp,
np.sqrt(var)/mean)
elif args.dist in 'expon':
lambdap = args.lambdap[0]
mean, var, skew, kurt = expon.stats(scale=lambdap, moments='mvsk')
x = np.linspace(expon.ppf(0.00001, scale=lambdap),
expon.ppf(0.99999, scale=lambdap),
num=1000)
rv = expon(scale=lambdap)
ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='Exponential PDF')
# Generate random numbers with this distribution
r = expon.rvs(scale=lambdap, size=args.runs)
ax.hist(r, bins=20, normed=True, histtype='stepfilled', alpha=0.4,
label='Experimental values')
meanexp = np.mean(r)
varexp = np.var(r)
print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp,
mean)
print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var)
print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp,
np.sqrt(var)/mean)
elif args.dist in 'hyperexp':
rv = hyperexp(args.probability, args.lambdap[0], args.lambdap[1])
x = np.linspace(0.00000001, 10.99999, num=1000)
ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='HyperExp PDF')
# ax.plot(x, rv.cdf(x), 'b-', lw=2, alpha=0.6, label='HyperExp CDF')
r = rv.rvs(size=args.runs)
ax.hist(r, normed=True, bins=100, range=(0, 11),
histtype='stepfilled', alpha=0.4, label='Experimental values')
meanexp = np.mean(r)
varexp = np.var(r)
mean = rv.mean()
var = rv.standard_dev()**2
print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp,
mean)
print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var)
print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp,
rv.CoV())
if args.graph:
ax.legend(loc='best', frameon=False)
plt.show()
if __name__ == '__main__':
main()
| gpl-2.0 |
macks22/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
thriveth/Pychelle | pychelle/show2dspec.py | 1 | 53224 | #!/usr/bin/env python
# encoding: utf-8
import scipy as sp
import numpy as np
import pandas as pd
import astropy.io.fits as pf
from traits.api import Instance, DelegatesTo, List, Str,\
Range, Bool, HasTraits, Button, Enum, Array
from traitsui.api import View, Item, Group, HGroup, VGroup,\
EnumEditor, Spring, LiveButtons, CheckListEditor, UItem
from traitsui.menu import OKButton, CancelButton, RevertButton,\
UndoButton
from enable.component_editor import ComponentEditor
from chaco.api import ArrayPlotData, PlotLabel, Plot, HPlotContainer, \
ImageData, bone, DataRange1D, create_line_plot,\
ScatterInspectorOverlay, ColorBar, LinearMapper, \
color_map_name_dict, OverlayPlotContainer, add_default_axes, \
add_default_grids, ColorMapper
from chaco.tools.api import ScatterInspector, ZoomTool, \
RangeSelection, RangeSelectionOverlay
from spectrum2d import Spectrum2D
from transition import Transition
from helper_functions import load_lines_series, _extract_1d, \
transition_from_existing
from lpbuilder import ProfileEditor
from paired import Paired
def load_2d(filename, objname=None, redshift=False):
""" Convenience function to open open a Spectrum2D instance."""
HDUList = pf.open(filename, ignore_missing_end=True)
data = HDUList[0].data
head = HDUList[0].header
# Load errors, set to 1. everywhere if error spec not present.
if(len(HDUList) > 1):
errs = HDUList[1].data
else:
print "No error spectrum present"
print "Set all errors to 1."
errs = sp.ones_like(data)
if len(data.shape) < 2:
print "This is a one-dimensional spectrum and cannot be opened \
in the 2D-viewer"
datathingie = Spectrum2D(data, errs, head)
if objname is not None:
datathingie.objname = objname
if redshift:
if objname is None:
raise ValueError('Cannot find redshift without object name.')
else:
datathingie.find_redshift()
return datathingie
def view_2d(Spectrum, Center=None):
""" Initializes the 2d-view of the given Spectrum2D() object. """
the_view = Show2DSpec(Spectrum=Spectrum)
if Center is not None:
the_view.Center = Center
the_view.configure_traits(view='main')
return the_view
def fit_transition_to_other(view=None, spectrum=None, transition=None,
freeze=['pos', 'sigma'], tie=[], method='leastsq',
verbose=False, rows='all'):
"""Fits a transition to a template transition.
+ Line widths are kept (almost?) constant.
+ Centroids are allowed to vary slightly, but may be tied up to fixed
inter-component distance. The entire line can move, but the components
cannot move relative to each other.
+ Amplitudes are by default allowed to move freely but might be frozen or
tied to each other.
Keyword arguments
-----------------
view : pychelle.Show2DSpec
Current 2D-view object being worked on, and its spectrum.
spectrum : pychelle.Spectrum2D
Spectrum2D object, if no view is created yet. The function will create
a new Show2dSpec object automatically.
transition : str
### NB! Currently, this doesn't actually do anything: ###
String containing the transition name as it appears in the Spectrum
model index. Defaults to the `transition` property of the passed
spectrum.
freeze : list
The subset of the parameters 'ampl', 'pos' or 'sigma' that should be
frozen in the fit.
tie : list
Same as freeze, only the subset that should be allowed to move with
the constraints set in the expressions below.
Either view or spectrum must be given.
Returns: A modified copy of the current model, with the selected transition
fitted to the data according to the rules defined above.
"""
if (view is None) & (spectrum is None):
print 'Either spectrum of view must be given. Aborting.'
return
elif view is None:
view = Show2DSpec(spectrum)
elif spectrum is None:
spectrum = view.Spectrum
else:
print('Redundant information given. Ignoring spectrum and using view.')
spectrum = view.Spectrum
v = view # Less typing!
# Not actually used for anything. Should it be?
if transition is None:
transition = spectrum.transition
if rows == 'all':
rows = [int(s.split('-')[0]) for s in spectrum.model.index.levels[1]]
# Cycle through all rows / row intervals in model index; fit to data.
for s in v.model.index.levels[1]:
nums = s.split('-')
if int(nums[0]) not in rows:
continue
if len(v.model.loc[(spectrum.transition, s)].drop('Contin')) < 1:
continue
v.LineLo = int(nums[0])
v.LineUp = int(nums[1])
print('\n \n Now fitting rows {} using method {}'.format(s, method))
lp = v.prepare_modeling()
# Now do the stuff that the Go button in LPbuilder does (more or less):
lp.create_fit_param_frame()
if verbose:
print('Parameters to fit: \n', lp.tofit)
lp.load_parameters_to_fitter()
# print(lp.params)
exprdict = {}
for i, compo in enumerate(lp.tofit.drop('Contin').index):
for f in freeze:
lp.params[compo+'_{}'.format(f.capitalize())].set(vary=False)
if i == 0:
amp1 = lp.tofit.loc[compo]['Ampl']
wl1 = lp.tofit.loc[compo]['Pos']
sig1 = lp.tofit.loc[compo]['Sigma']
refname = compo
else:
coeff = lp.tofit.loc[compo]['Ampl'] / amp1
posdiff = lp.tofit.loc[compo]['Pos'] - wl1
sigcoef = lp.tofit.loc[compo]['Sigma'] / sig1
ampl_expr = '{}_Ampl * {}'.format(refname, coeff)
pos_expr = '{}_Pos + {}'.format(refname, posdiff)
sig_expr = '{}_Sigma * {}'.format(refname, sigcoef)
if 'ampl' in tie:
exprdict[compo+'_Ampl'] = ampl_expr
if 'pos' in tie:
exprdict[compo+'_Pos'] = pos_expr
if 'sigma' in tie:
exprdict[compo+'_Sigma'] = sig_expr
for key in exprdict.keys():
com = lp.params[key]
com.set(expr=exprdict[key])
print(lp.params)
v.lp = lp
print 'Now fitting rows: {}'.format(s)
v.lp.fit_with_lmfit(method=method, conf='conf')
v.process_results()
v._build_model_plot()
print('Succesfully fitted rows {} using method {}\n \n'.format(s, method))
# Don't alter any data in-place
# transframe = spectrum.model.loc[transition].copy()
outframe = lp.output
return outframe
lines_srs = load_lines_series()
class SetFitRange(HasTraits):
spectrum = Instance(Spectrum2D)
data = DelegatesTo('spectrum')
errs = DelegatesTo('spectrum')
Center = DelegatesTo('spectrum')
wavl = DelegatesTo('spectrum')
fitranges = DelegatesTo('spectrum')
add = Button('Add')
reset = Button('Reset')
def _update_plot(self):
del(self.container.plot_components[2:])
mv = self.markerval
for i in self.fitranges:
ys = sp.array([mv, mv])
xs = sp.asarray(i)
# print xs, type(xs), xs.dtype
plot = create_line_plot((xs, ys), color='orange', width=5)
plot.value_mapper = self.plot.value_mapper
plot.index_mapper = self.plot.index_mapper
self.container.add(plot)
self.container.request_redraw()
# print i
return
def _add_fired(self):
if type(self.plot.active_tool.selection) == tuple:
range_select = self.plot.active_tool
ranges = range_select.selection
self.fitranges.append(ranges)
self._update_plot()
self.plot.active_tool.deselect()
def _reset_fired(self):
del(self.fitranges[:])
del(self.container.plot_components[2:])
# print self.fitranges
xs = sp.array([0, 1])
ys = sp.array([0, 0])
plot = create_line_plot((xs, ys))
# Hack to make sure plot is properly updated:
self.container.add(plot)
self.container.remove(plot)
self.container.request_redraw()
self.plot.active_tool.deselect()
def __init__(self, spectrum):
super(SetFitRange, self).__init__(spectrum=spectrum)
self.rows = self.data.shape[0]
# self.rangexs = sp.array([])
# self.rangeys = sp.array([])
if spectrum.transition != 'None':
try:
has_range = self.spectrum.model.notnull().get_value(
(self.spectrum.transition,
self.spectrum.lines_sel,
'Contin'),
'Fitranges'
)
if not has_range:
raise ValueError
else:
the_range = self.spectrum.model.get_value(
(self.spectrum.transition,
self.spectrum.lines_sel,
'Contin'),
'Fitranges'
)
self.fitranges = the_range
except KeyError:
self.fitranges = []
except ValueError:
self.fitranges = []
data1d, errs1d = _extract_1d(self.data, self.errs, 1, self.rows-1)
data1d, errs1d = _extract_1d(
self.data, self.errs, spectrum.LineLo, spectrum.LineUp
)
plotindex = sp.where(
(self.wavl > self.Center - 50) & (self.wavl < self.Center + 50))
self.markerval = data1d[plotindex].max() * 1.05
container = OverlayPlotContainer(
padding=40, bgcolor="white",
use_backbuffer=True,
border_visible=True,
fill_padding=False
)
self.plot = create_line_plot(
(self.wavl[plotindex], data1d[plotindex]),
color='black',)
add_default_grids(self.plot)
add_default_axes(self.plot)
self.plot.value_range.set_bounds(data1d[plotindex].max() * -.1,
data1d[plotindex].max() * 1.1)
self.plot2 = create_line_plot(
(self.wavl[plotindex], errs1d[plotindex]),
color='green',)
self.plot2.value_mapper = self.plot.value_mapper
self.plot2.index_mapper = self.plot.index_mapper
container.add(self.plot)
container.add(self.plot2)
self.plot.active_tool = RangeSelection(
self.plot,)
self.container = container
self.plot.overlays.append(RangeSelectionOverlay(component=self.plot))
if len(self.fitranges) > 0:
self._update_plot()
view = View(
Item('container', editor=ComponentEditor(), show_label=False),
HGroup(
Item('add', show_label=False),
Item('reset', show_label=False),),
buttons=LiveButtons,
kind='livemodal',
resizable=True,
height=700, width=900,
)
class Show2DSpec(HasTraits):
"""The class that displays a Spectrum2D instance.
A module to view and select regions to fit from the 2D-spectrum.
Selected regions are forwarded to the ProfileEditor class where they can be
modelled and later (in-program or outside?) can be fitted by a fitting
backend, e.g. Sherpa.
It takes as input a 2D numpy array and a PyFITS header object
(other options later?)
"""
Spectrum = Instance(Spectrum2D)
data = DelegatesTo('Spectrum')
errs = DelegatesTo('Spectrum')
header = DelegatesTo('Spectrum')
wavl = DelegatesTo('Spectrum')
wavlmid = DelegatesTo('Spectrum')
wavlmin = DelegatesTo('Spectrum')
wavlmax = DelegatesTo('Spectrum')
LineLo = DelegatesTo('Spectrum')
LineUp = DelegatesTo('Spectrum')
Lines = DelegatesTo('Spectrum') # Dict of linenums in str and flt form
LSF = DelegatesTo('Spectrum')
Center = DelegatesTo('Spectrum')
line_spectra = DelegatesTo('Spectrum')
transit_dict = DelegatesTo('Spectrum')
transit_list = DelegatesTo('Spectrum')
model = DelegatesTo('Spectrum')
transition = DelegatesTo('Spectrum')
fitranges = DelegatesTo('Spectrum')
add_trans = Button(label='New transition')
specplot = Instance(Plot)
fit_this = Button(label='Guess / Fit')
# Whether to show color range editor
ColorRange = Bool(False)
Interact = Bool(True)
line_sel_lock = Bool(False)
show_model_comps = Bool(True)
ShowContin = Button(label='Show/Edit continuity plots')
ShowColran = Button(label='Show/Edit')
ShowFitran = Button(label='Show/Edit')
ColorScale = Enum(['Linear', 'Sqrt', 'Log'])
colormaps = color_map_name_dict
colormaps_name = Enum(sorted(colormaps.keys()))
# For continuity plot window:
set_label = Button(label='Set identifier label')
remove_comp = Button(label='Remove selected')
unselect_all = Button(label='Clear selections')
apply_to_all_transitions = Bool(False)
all_labels = List(editor=CheckListEditor(values=[], cols=1,))
the_label = Str()
def _build_model_plot(self):
''' This helper method builds the model plot and rebuilds it when the
model is changed. First, construct the x and y values:
'''
rowcoords = []
rowthicks = []
# Get row number labels, and get list of the mean of each of them and
# the width of the rows.
for row in self.model.drop('Dummy', level=0).drop('Contin', level=2).\
index.get_level_values(1):
rowcoords.append(sp.float64(row.split('-')).sum() / 2.)
rowthicks.append(sp.float64(row.split('-')).ptp())
rowcoords = sp.array(rowcoords) - 0.5
rowthicks = sp.array(rowthicks)
# Get the identifier tags, map them t integers for colormapping in
# plot (or other mapping as one would like):
id_colors = (self.model.drop('Dummy', level=0).drop('Contin', level=2)
.Identifier.map(ord).values - 97) % 12 + 1
pos_frame = self.model.drop('Dummy', level=0)\
.drop('Contin', level=2)
pos_array = pos_frame.Pos.values + pos_frame['Line center'].values
cont_series = self.model.drop('Dummy', level=0).loc[
self.model.drop('Dummy', level=0).Identifier.isnull()]
# Check if a transition is selected. If yes, then create continuity
# plot data arrays consisting of the selected transition alone;
# otherwise, create an empty plot.
if self.transition in self.model.index.levels[0]:
current_pos_frame = pos_frame.loc[self.transition]
current_pos_array = current_pos_frame.Pos.values\
+ current_pos_frame['Line center'].values
current_cont = cont_series.loc[self.transition]
curr_cont = current_cont.Ampl.loc[
current_cont.Identifier.isnull()
]
curr_id = (pos_frame.loc[self.transition].Identifier.map(ord)
.values - 97) % 12 + 1
current_ys = current_pos_frame.index.droplevel(1).map(
lambda x: x.split('-')[0]).astype(float) - .5
amp_array = self.model.loc[self.transition]\
.drop('Contin', level=1).Ampl.values
sig_array = self.model.loc[self.transition]\
.drop('Contin', level=1).Sigma.values
cont_y = cont_series.loc[self.transition].index.droplevel(1)\
.map(lambda x: x.split('-')[0]).astype(float) - 0.5
cont_array = curr_cont.values
else:
current_pos_array = np.array([])
sig_array = np.array([])
amp_array = np.array([])
cont_array = np.array([])
cont_y = np.array([])
current_ys = np.array([])
curr_id = np.array([])
curr_cont = np.array([])
# Inject into the class' ArrayPlotData object, so it's available for
# all Plot instances that read this.
self.plotdata.set_data('model_y', rowcoords)
self.plotdata.set_data('model_x', pos_array)
self.plotdata.set_data('model_w', rowthicks)
self.plotdata.set_data('model_colors', id_colors)
self.plotdata.set_data('model_amp', amp_array)
self.plotdata.set_data('model_sig', sig_array)
self.plotdata.set_data('contin_amp', cont_array)
self.plotdata.set_data('contin_y', cont_y)
self.plotdata.set_data('curr_pos', current_pos_array)
self.plotdata.set_data('curr_y', current_ys)
self.plotdata.set_data('curr_id', curr_id)
# Update ranges for the continuety plots:
# First, check if a transition is selected. Otherwise, create empty
# plots with generic [0, 1] ranges.
Posrange = np.array([[current_pos_array.min(), pos_array.max()]
if len(current_pos_array) > 1
else [0., 1.]][0])
Amprange = np.array([[amp_array.min(), amp_array.max()]
if len(amp_array) > 1
else [0., 1.]][0])
Sigrange = np.array([[sig_array.min(), sig_array.max()]
if len(sig_array) > 1
else [0., 1.]][0])
# Now, set the ranges.
self.Posplot.index_range.set_bounds(
Posrange.min()
- Posrange.ptp() * .1,
Posrange.max()
+ Posrange.ptp() * .1
)
self.Ampplot.index_range.set_bounds(
Amprange.min()
- Amprange.ptp() * .1,
Amprange.max()
+ Amprange.ptp() * .1
)
self.Sigplot.index_range.set_bounds(
Sigrange.min()
- Sigrange.ptp() * .1,
Sigrange.max()
+ Sigrange.ptp() * .1)
return
def _transition_default(self):
return ''
def __init__(self, Spectrum, center=None):
""" Non-passive object needs contructor to know and manipulate its own
Traits. """
# Make sure this constructor function can access parent spec Traits.
super(Show2DSpec, self).__init__(Spectrum=Spectrum)
# After Use newly accessed self awareness to construct new Traits
self.transit_list.extend(self.transit_dict.keys())
self.all_labels = self.model.drop('Dummy', level=0)\
.drop('Contin', level=2)['Identifier'].unique().tolist()
# print self.all_labels
self.add_trait(
'val_max',
Range(
-0., float(self.data[30:-30, :].max()),
float(self.data[30:-30, 1000:2000].max())))
self.add_trait(
'val_min',
Range(0., float(self.data[30:-30, :].max()), 0.))
# Create arrays for plotting and minor helper-arrays, define misc.
# values etc.
wyes = sp.arange(self.data.shape[0])
# Data for image plot, initial.
self.imgdata = ImageData(data=self.data[:, :-1], value_depth=1)
# Data for different color scales:
self.lindata = self.data[:, :-1] # , value_depth=1
tempdata = self.data.copy()
tempdata[tempdata < 0] = 0.
self.sqrtdata = np.sqrt(tempdata[:, :-1].copy()) # , value_depth=1
tempdata[tempdata == 0.] = 0.001
self.logdata = np.log10(tempdata[:, :-1].copy())
linexes = sp.array([self.Center, self.Center])
loexes = linexes - 30.
hiexes = linexes + 30.
linwyes = sp.array([wyes.min(), wyes.max()])
indrange = sp.array(
[self.wavlmin, self.wavlmax, self.wavlmax, self.wavlmin])
valrange = sp.array(
[self.LineUp, self.LineUp, self.LineLo - 1, self.LineLo - 1])
model_x = sp.array([0.])
model_y = sp.array([0.])
model_w = sp.array([0.])
model_amp = sp.array([0.])
contin_amp = sp.array([0.])
contin_y = sp.array([0.])
curr_y = sp.array([0.])
model_sig = sp.array([0.])
dummy = sp.array([0.])
model_colors = sp.array([0.])
curr_pos = sp.array([0.])
curr_id = sp.array([0.])
# ==============================================================
# Define ArrayPlotData object and the plot itself.
# Plot data object:
self.plotdata = ArrayPlotData(
dummy=dummy,
imagedata=self.imgdata,
exes=linexes,
loex=loexes,
hiex=hiexes,
wyes=linwyes,
polexes=indrange,
polwyes=valrange - .0,
model_x=model_x,
model_y=model_y,
model_w=model_w,
curr_pos=curr_pos,
curr_id=curr_id,
curr_y=curr_y,
model_amp=model_amp,
contin_amp=contin_amp,
contin_y=contin_y,
model_sig=model_sig,
model_colors=model_colors
)
# Plot object containing all the rest.
myplot = Plot(self.plotdata)
# Define image plot of the 2D data
self.my_plot = myplot.img_plot(
"imagedata",
name='Image',
colormap=bone, # gist_heat,
xbounds=self.wavl[:] # -1],
)[0]
# Normalise the colorbar to the value range of the data.
self.my_plot.value_range.set_bounds(self.val_min, self.val_max)
# Define the rectangle overlay showing wich lines are selected.
self.selected = myplot.plot(
('polexes', 'polwyes'),
name='Selected rows',
type='polygon',
face_color=(0.5, 0.5, 0.9) + (0.3,),
edge_width=0.3,
edge_color=(1., 1., 1.) + (1.,),
edge_alpha=1.,
)[0]
# Once defined, add these to the plot object.
myplot.add(self.my_plot)
myplot.add(self.selected)
# Marker for the center line
center_marker = myplot.plot(
('exes', 'wyes'),
type='line',
color='green', line_width=1.5, alpha=1.)
# Lower and upper limits for the 1D plot shown with markers.
lower_marker = myplot.plot(
('loex', 'wyes'),
type='line',
color='yellow',
line_width=.3)
higher_marker = myplot.plot(
('hiex', 'wyes'), type='line',
color='yellow', line_width=.3)
# Model plot.
# For some reason, all elements of a Plot() instance by default share
# the same mappers. This has the slightly bizarre consequence that we
# have to assign the model plot the wrong colormapper (which will then
# be valid globally, i.e. for the image plot), and then after the fact
# assign a different color_mapper to the actual model plot.
# Yes, seriously.
self.centroids_plot = myplot.plot(
('model_x', 'model_y', 'model_colors'),
type='cmap_scatter',
vmin=97,
vmax=123,
color_mapper=bone, # gist_heat,
name='centroids',
marker_size=2.5,
outline_color='transparent',
)
# Set fixed color range based on ColorBrewer 'Paired' sequence
paired_mapper = ColorMapper.from_palette_array(
[Paired[x] for x in sorted(Paired.keys())][:12],
# Paired.values()[:12],
range=DataRange1D(low=1, high=12),
steps=12
)
self.paired_mapper = paired_mapper
self.centroids_plot[0].color_mapper = paired_mapper # (
# =====================================================================
# And now: the parameter-space continuity plot.
ContCont = HPlotContainer(
use_backbuffer=True,
resizable='hv',
bgcolor='transparent',
spacing=-50,
padding_top=20,
)
Posplot = Plot(self.plotdata)
Sigplot = Plot(self.plotdata)
Ampplot = Plot(self.plotdata)
# posplot = Posplot.plot( # Same as 'centroids'! Different container.
# ('model_x', 'model_y', 'model_colors'),
# type='cmap_scatter',
# color_mapper=paired_mapper, # gist_rainbow,
# marker_size=4,
# outline_color='gray',
# name='Positions',
# bgcolor='whitesmoke',
# # bgcolor='lavender',
# )
posplot = Posplot.plot( # Same as 'centroids'! Different container.
('curr_pos', 'curr_y', 'curr_id'),
type='cmap_scatter',
color_mapper=paired_mapper, # gist_rainbow,
marker_size=4,
outline_color='gray',
name='Positions',
bgcolor='whitesmoke',
# bgcolor='lavender',
)
ampplot = Ampplot.plot(
('model_amp', 'curr_y', 'curr_id'),
type='cmap_scatter',
color_mapper=paired_mapper, # gist_rainbow,
marker_size=4,
name='Amplitudes',
bgcolor='cornsilk',
index_scale='log',
# bgcolor='white',
)
contplot = Ampplot.plot(
('contin_amp', 'contin_y'),
type='scatter',
color='black',
name='Continuum',
)
sigplot = Sigplot.plot(
('model_sig', 'curr_y', 'curr_id'),
type='cmap_scatter',
color_mapper=paired_mapper, # gist_rainbow,
marker_size=4,
name='Sigma',
bgcolor='lavender',
# bgcolor='thistle',
# bgcolor='white',
)
Posplot.title = 'Centroid positions'
Posplot.value_axis.title = 'Row #'
Posplot.index_axis.title = 'Wavelength [Å]'
Ampplot.title = 'Amplitudes'
Ampplot.index_axis.title = 'Amplitude [flux]'
Sigplot.title = 'Line widths'
Sigplot.index_axis.title = 'Line width [Å]'
ContCont.add(Posplot)
ContCont.add(Ampplot)
ContCont.add(Sigplot)
ContCont.overlays.append(
PlotLabel(
' '.join(
"Select Points on Centroids plot and assign them a label. \
Zoom by using the mouse wheel or holding Ctrl and \
dragging mouse to mark zoom region. Use ESC to revert \
zoom.".split()),
component=ContCont,
overlay_position='top'))
# Attach some tools to the plot
Posplot.overlays.append(ZoomTool(Posplot))
Sigplot.overlays.append(ZoomTool(Sigplot))
Ampplot.overlays.append(ZoomTool(Ampplot))
# Add ScatterInspector tool and overlay to the Posplot part.
posplot[0].tools.append(ScatterInspector(posplot[0]))
overlay = ScatterInspectorOverlay(
posplot[0],
hover_color="red",
hover_marker_size=5,
selection_marker_size=4,
selection_color="transparent",
selection_outline_color="white",
selection_line_width=1.5)
posplot[0].overlays.append(overlay)
Posplot.value_range.set_bounds(wyes.min(), wyes.max())
Posplot.index_range.set_bounds(model_x.min() * .9,
model_x.max() * 1.1)
Ampplot.value_range = Posplot.value_range
Ampplot.index_range.set_bounds(model_amp.min() * .9,
model_amp.max() * 1.1)
Sigplot.value_range = Posplot.value_range
Sigplot.index_range.set_bounds(model_sig.min() * .9,
model_sig.max() * 1.1)
self.Posplot = Posplot
self.posplot = posplot
self.Ampplot = Ampplot
self.Sigplot = Sigplot
self.ContCont = ContCont
# Create the colorbar, handing in the appropriate range and colormap
colormap = self.my_plot.color_mapper
colorbar = ColorBar(
index_mapper=LinearMapper(range=colormap.range),
color_mapper=colormap,
plot=self.my_plot,
orientation='v',
resizable='v',
width=25,
padding=20)
colorbar.padding_top = myplot.padding_top
colorbar.padding_bottom = myplot.padding_bottom
container = HPlotContainer(use_backbuffer=True)
container.add(myplot)
container.add(colorbar)
container.bgcolor = "sys_window"
self.container = container
self.specplot = myplot
# If a center is given in the call, set this.
if center is not None:
self.Center = center
# Set the wavelength range to show.
self.specplot.index_range.low_setting, \
self.specplot.index_range.high_setting\
= (self.Center - 55, self.Center + 55)
self.wyes = wyes # For debugging
if len(self.model) > 1:
self._build_model_plot()
# END __init__()
# =====================================================================
# =========================================================================
# Handlers for change of traits.
# =========================================================================
# Update the edges of the overlay rectangle when the chosen line numbers
# are changed.
def _LineLo_changed(self):
self.plotdata.set_data(
'polwyes',
sp.array([self.LineUp, self.LineUp,
self.LineLo - 1, self.LineLo - 1]) + .0)
try:
fitrange = self.model\
.loc[self.transition]\
.loc['{}-{}'.format(self.LineLo, self.LineUp)]\
.loc['Contin']['Fitranges']
if np.isnan(fitrange).any():
self.fitranges = []
else:
self.fitranges = fitrange
except KeyError:
self.fitranges = []
def _LineUp_changed(self):
self.plotdata.set_data(
'polwyes',
sp.array([self.LineUp, self.LineUp,
self.LineLo - 1, self.LineLo - 1]) + .0)
# When Center is changed, move the image accordingly, but make sure the
# center and wing markers are still in the middle of the plot.
def _Center_changed(self):
if self.transition != 'None' and self.line_sel_lock is False:
self.transition = 'None'
self.specplot.index_range.low_setting, \
self.specplot.index_range.high_setting\
= (self.Center - 55, self.Center + 55)
self.plotdata.set_data('exes', sp.array([self.Center, self.Center]))
self.plotdata.set_data(
'loex', sp.array([self.Center, self.Center]) - 30.)
self.plotdata.set_data(
'hiex', sp.array([self.Center, self.Center]) + 30.)
self.my_plot.request_redraw()
def _transition_changed(self):
"""Change the Center parameter to that of the selected transition."""
if self.transition == 'None':
pass
else:
print 'New transition selected: ', self.transition
transwl = self.transition.split('(')[1][:-1]
# Make sure the selected line is *not* changed to 'None' when we
# jump to the center of the newly selected line:
self.line_sel_lock = True
self.Center = float(transwl)
self.line_sel_lock = False
self.transwl = float(transwl)
self._build_model_plot()
# Mostly to deal with a manually set model in the Spectrum2D Instance:
def _transit_list_changed(self):
self._build_model_plot()
# Update color scale when requested from GUI.
def _val_min_changed(self):
if self.val_min > self.val_max:
self.val_max = self.val_min
self.my_plot.value_range.set_bounds(self.val_min, self.val_max)
self.my_plot.request_redraw()
self.specplot.request_redraw()
def _val_max_changed(self):
if self.val_min > self.val_max:
self.val_min = self.val_max
self.my_plot.value_range.set_bounds(self.val_min, self.val_max)
self.specplot.request_redraw()
def _show_model_comps_changed(self):
clr_range = self.my_plot.color_mapper.range
if self.show_model_comps is True:
self.specplot.showplot('centroids')
if self.show_model_comps is False:
self.specplot.hideplot('centroids')
# Ugly hack to make sure plot updates:
self.plotdata.set_data('model_y', self.plotdata['model_y'] + .5)
self.plotdata.set_data('model_y', self.plotdata['model_y'] - .5)
# </ugly hack>
self.my_plot.request_redraw()
self.container.request_redraw()
self.specplot.request_redraw()
# =========================================================================
# Handler for fired button and possibly other events added in future
def _add_trans_fired(self):
transition = Transition(spectrum=self.Spectrum)
if transition.Succes:
foo = transition.configure_traits(view='Choose')
transname = transition.choices
# print transname
# print ' '.join(transname.split()[:-1])
# import pdb; pdb.set_trace() # XXX BREAKPOINT
transwl = lines_srs.loc[' '.join(transname.split()[:-1])]\
['Lambda_0'] * (1 + transition.z)
# print 'transwl: ', transwl
if foo:
print "This is Show2DSpec: transition added '" \
+ transname + "'"
self.transition = transname # *After* setting transit_dict.
# We don't want duplicate entries in the transition list:
if transname not in self.transit_list:
self.transit_list.append(transname)
# print 'Transition: ', self.transition
# If from_existing is selected, transfer and transform existing
# transition to new.
if transition.from_existing:
oldtrans = transition.select_existing
oldwl = self.model['Line center'].dropna().loc[oldtrans][0]
LSF_old = np.interp(oldwl, self.wavl, self.LSF)
LSF_new = np.interp(transwl, self.wavl, self.LSF)
self.model = transition_from_existing(
self.model, oldtrans, transname, transwl,
LSF=[LSF_old, LSF_new]
)
if transition.fit_now:
try:
fit_transition_to_other(self)
except:
print 'Quick fit did not succeed.'
raise
self._build_model_plot()
else:
print 'Cancelled, no new transition added.'
else:
print 'Something wrong when adding transition! \n'
return
# =========================================================================
# Functionality of the "Fit this" button, chopped up in one function per
# logical step, to allow more flexible scripting and doing nonstandard
# things that are not easily integrated into the GUI.
def get_fitdata(self):
data1d, errs1d = _extract_1d(
self.data, self.errs, self.LineLo, self.LineUp)
return data1d, errs1d
def define_fitranges(self):
if self.transition == 'None':
transname = 'Center: ' + str(self.Center)
self.transition = transname
else:
transname = self.transition
Lines = [self.LineLo - 1, self.LineUp]
linesstring = str(Lines[0] + 1) + '-' + str(Lines[1])
if (transname, linesstring, 'Contin') in self.model.index:
fitranges = self.model.get_value(
(transname, linesstring, 'Contin'), 'Fitranges')
# print 'Fitranges Show2dspec: ', fitranges
if type(fitranges) == float:
fitranges = [(self.Center - 30., self.Center + 30)]
elif len(self.fitranges) > 0:
# print 'self.fitranges was longer than 0 but fitranges not in model'
fitranges = self.fitranges
else:
fitranges = [(self.Center - 30., self.Center + 30)]
return transname, linesstring, fitranges
def prepare_modeling(self):
transname, linesstring, fitranges = self.define_fitranges()
data1d, errs1d = self.get_fitdata()
lp = ProfileEditor(
self.wavl, data1d, errs1d, self.Center, fitrange=fitranges
)
lp.transname = transname
lp.linesstring = linesstring
if len(self.fitranges) == 0:
self.fitranges = [(self.wavlmin, self.wavlmax)]
print 'Components before: ', lp.Components
# Inject existing model into the LineProfile object if there is one.
if ((transname in self.model.index.levels[0]) and
(linesstring in self.model.index.levels[1])):
to_insert = self.model.loc[transname].loc[linesstring]
# Remove the automatically created components in lp. model.
print to_insert.index
if 'Comp' not in to_insert.index:
pass
if 'Comp1' not in to_insert.index:
if len(to_insert.index) > 1:
lp.Components.pop('Comp1')
lp.CompoList = []
# Make sure all columns are there
for S in ['Pos_stddev', 'Sigma_stddev', 'Ampl_stddev']:
if S not in to_insert.columns:
to_insert[S] = np.nan
# Insert model into LPbuilder model.
for i in to_insert.index:
if len(to_insert.index) == 1:
continue
if i == 'Contin':
lp.Components[i][0] = to_insert.xs(i)['Ampl']
else:
lp.Components[i] = list(
to_insert[
['Pos', 'Sigma', 'Ampl', 'Identifier',
'Pos_stddev', 'Sigma_stddev', 'Ampl_stddev']]
.xs(i).values
)
print 'Components after: ', lp.Components
print lp.CompoList
lp.import_model()
return lp
def process_results(self):
""" Inserts fit results from LPbuilder into model of Spectrum2D.
NOTE
----
This method makes in-place changes to the model.
"""
# Since there may be more components in the old model than the new,
# we cannot overwrite component-wise, so we remove entire submodel
# for the current transname-linesstring combo and rewrite it.
self.model.sortlevel(0)
transname, linesstring = self.lp.transname, self.lp.linesstring
if ((transname in self.model.index.levels[0].values.sum()) and
(linesstring in self.model.index.levels[1].values.sum())):
self.model = self.model\
.unstack()\
.drop((transname, linesstring))\
.stack()
for thekey in self.lp.Components.keys():
if not thekey == 'Contin':
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Ampl',
self.lp.Components[thekey][2]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Ampl_stddev',
self.lp.Components[thekey][6]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Pos',
self.lp.Components[thekey][0]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Pos_stddev',
self.lp.Components[thekey][4]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Sigma',
self.lp.Components[thekey][1]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Sigma_stddev',
self.lp.Components[thekey][5]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Identifier',
self.lp.Components[thekey][3]
)
# Keep track of line center position for each transition:
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Line center',
self.Center
)
else:
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Ampl',
self.lp.Components[thekey][0]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Ampl_stddev',
self.lp.Components[thekey][1]
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'Fitranges',
self.lp.rangelist
)
try:
self.model = self.model.set_value(
(transname, linesstring, thekey),
'RedChi2',
self.lp.result.redchi
)
except:
print ('No fit performed for lines {}, RedChi set to NaN'
.format(self.Spectrum.lines_sel)
)
self.model = self.model.set_value(
(transname, linesstring, thekey),
'RedChi2', np.nan
)
self.model = self.model.sort()
# return
def _fit_this_fired(self):
# Extract rows to 1D spectrum, send this to
# ProfileEditor class:
print ' '
print 'Now modelling selected rows and transition:'
# Is this step really necessary?
transname, linesstring, fitranges = self.define_fitranges()
print 'Transition to be modelled:', transname
print 'Rows selected: {0} to {1}'.format(self.LineLo, self.LineUp)
self.lp = self.prepare_modeling()
# Now, ready to rock.
new_model = self.lp.configure_traits(view='view')
print 'Line Profile return: ', new_model # .result
# When done creating the guessed or fitted model,
# insert it into Pandas DataFrame.
if new_model: # .result:
self.process_results()
self._build_model_plot()
self.Posplot.request_redraw()
return
# End of "Fit this" button functionality.
# =========================================================================
# =========================================================================
# The different window layouts. So far the main window, the volor range
# editor, the continuity plot window and the ID label assigning window.
# =========================================================================
# Main window.
main = View(
Item(
'container',
editor=ComponentEditor(),
resizable=True,
show_label=False,
width=1400
),
Group(
Group(
Item('Center', show_label=False, springy=True),
springy=True,
show_border=True,
label='Center'
),
HGroup(
Item('LineLo', style='custom', label='lower', springy=True),
Item('LineUp', style='custom', label='Upper', springy=True),
label='Selected rows',
show_border=True,
springy=True
),
HGroup(
Item('show_model_comps', label='Show'),
label='Model',
show_border=True
),
orientation='horizontal'
),
HGroup(
HGroup(
Item('ShowContin', show_label=False),
label='Model parameter inspector',
show_border=True,
),
HGroup(
Item('ShowColran', show_label=False),
label='Color Range',
show_border=True,
),
HGroup(
Item('ShowFitran', show_label=False),
label='Fit wavelength range',
show_border=True,
),
HGroup(
Item('transition',
editor=EnumEditor(name='transit_list'),
label='Choose'
),
Item('add_trans', show_label=False),
label='Transition:',
show_border=True,
),
Spring(),
Item('fit_this', show_label=False),
springy=True,
),
buttons=[OKButton, CancelButton],
resizable=True,
title='Pychelle - 2D viewer & selector')
# The color range editor window
ColranView = View(
Group(
VGroup(
Item('val_min', label='Min:', springy=True),
Item('val_max', label='Max:', springy=True),
label='Cut levels',
show_border=True,
),
HGroup(
Item('ColorScale', label='Color scale'), # style='custom'),
Item('colormaps_name', label='Colormap'),
label='Scale and colors',
show_border=True,
),
),
title='Edit plot look & feel',
buttons=[UndoButton, RevertButton, CancelButton, OKButton]
)
# The UI window showing the continuity plots and calling the Identifier
# label assigning window below.
ContinView = View(
VGroup(
Group(
Item(
'ContCont',
editor=ComponentEditor(),
show_label=False,
),
'|',
UItem(
'all_labels',
show_label=False,
style='readonly',
label='Show/hide components'
)
),
HGroup(
Item('unselect_all', show_label=False),
Spring(),
Item('apply_to_all_transitions'),
Item('set_label', show_label=False),
Item('remove_comp', show_label=False),
springy=True,
),
),
resizable=True,
width=1200.,
height=600.,
buttons=[UndoButton, RevertButton, CancelButton, OKButton],
kind='live',
)
# Interface to set identifier labels.
ReassignView = View(
HGroup(
Item(
'all_labels', style='readonly',
label='Existing',
show_label=False
),
VGroup(
Item('the_label', label='Set identifier label',
style='custom'),
)
),
buttons=[CancelButton, OKButton],
close_result=True,
kind='livemodal'
)
AreYouSureString = 'Pressong OK will permanently delete \n' +\
'the selected components'
AreYouSureView = View(
Item('AreYouSureString', style='readonly'),
buttons=['OK', 'Cancel']
)
def _ShowColran_fired(self):
self.edit_traits(view='ColranView')
def _ColorScale_default(self):
return 'Linear'
def _ColorScale_changed(self):
if self.ColorScale == 'Linear':
self.plotdata.set_data('imagedata', self.lindata)
self.val_max = float(self.lindata[30:-30, 1000:2000].max())
self.specplot.request_redraw()
if self.ColorScale == 'Sqrt':
self.plotdata.set_data('imagedata', self.sqrtdata)
self.val_min = 0.
self.val_max = float(self.sqrtdata[30:-30, 1000:2000].max())
self.specplot.request_redraw()
if self.ColorScale == 'Log':
self.plotdata.set_data('imagedata', self.logdata)
self.val_max = float(self.logdata[30:-30, 1000:2000].max())
self.val_min = 0.
self.specplot.request_redraw()
return
def _colormaps_name_default(self):
return 'gray'
def _colormaps_name_changed(self):
print("Selected colormap: {}".format(self.colormaps_name))
clr_range = self.my_plot.color_mapper.range
self.my_plot.color_mapper = \
color_map_name_dict[self.colormaps_name](clr_range)
def _ShowContin_fired(self):
self.model = self.model.sort_index()
self._build_model_plot()
self.edit_traits(view='ContinView')
def _ShowFitran_fired(self):
A = SetFitRange(self.Spectrum)
A.edit_traits()
theidx = (self.Spectrum.transition, self.Spectrum.lines_sel, 'Contin')
# print self.fitranges
if A: # and (theidx[0] in self.model.index.levels[0]):
if not theidx in self.model.index:
self.model.set_value(theidx, 'Ampl', 0.0)
# if len(self.model.loc[self.Spectrum.transition].loc[self.Spectrum.lines_sel]) == 1:
# self.model.loc[
# (self.Spectrum.transition, self.Spectrum.lines_sel, 'Comp1'),
# ['Pos', 'Sigma', 'Ampl', 'Identifier']
# ] = [0, 0, 0, 'a']
self.model.set_value(theidx, 'Fitranges', self.fitranges)
def _unselect_all_fired(self):
"""Clears all selections."""
self.posplot[0].index.metadata['selections'] = []
self.posplot[0].value.metadata['selections'] = []
def _set_label_fired(self):
self.model = self.model.sort_index()
self._build_model_plot()
y_mask = self.posplot[0].value.metadata.get('selections', [])
x_mask = self.posplot[0].index.metadata.get('selections', [])
self.all_labels = self.model.drop('Dummy', level=0)\
.drop('Contin', level=2)['Identifier'].unique().tolist()
do_it = self.edit_traits(view='ReassignView')
print('Label to set: {0}'.format(self.the_label))
# print do_it
if self.apply_to_all_transitions is True:
transits = self.Spectrum.model.index.levels[0].tolist()
else:
transits = [self.transition]
model = self.model.copy()#\
# .set_index('Identifier', append=True, drop=False)\
# .reset_index('Component', drop=False)
the_index = model.loc[self.transition]\
.drop('Contin', level=1).index[x_mask]
for t in transits:
for ind in the_index:
model = model.set_value(
(t, ind[0], ind[1]), 'Identifier', self.the_label
)
# TODO: I think this is the source for my problems. Why change index
# here??
self.model = model#.set_index('Component', append=True, drop=True)\
# .reset_index('Identifier', drop=True)
self._build_model_plot()
self.ContCont.request_redraw()
self.Posplot.request_redraw()
self.apply_to_all_transitions = False
def _remove_comp_fired(self):
if self.apply_to_all_transitions is True:
transits = self.Spectrum.model.index.levels[0].tolist()
else:
transits = [self.transition]
y_mask = self.posplot[0].value.metadata.get('selections', [])
x_mask = self.posplot[0].index.metadata.get('selections', [])
model = self.model.copy()\
.set_index('Identifier', append=True, drop=True)\
.reset_index('Component', drop=False)
the_index = model.loc[self.transition]\
.drop('Contin', level=1).index[x_mask]
for t in transits:
for ind in the_index:
model.drop((t, ind[0], ind[1]), inplace=True)
self.model = model.set_index('Component', append=True)\
.reset_index('Identifier')
self._unselect_all_fired()
self._build_model_plot()
self.ContCont.request_redraw()
self.Posplot.request_redraw()
self.apply_to_all_transitions = False
| gpl-3.0 |
oemof/examples | oemof_examples/oemof.solph/v0.3.x/generic_chp/bpt.py | 2 | 3093 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example that illustrates how to use custom component `GenericCHP` can be used.
In this case it is used to model a back pressure turbine.
Installation requirements
-------------------------
This example requires the version v0.3.x of oemof. Install by:
pip install 'oemof>=0.3,<0.4'
"""
__copyright__ = "oemof developer group"
__license__ = "GPLv3"
import os
import pandas as pd
import oemof.solph as solph
from oemof.network import Node
from oemof.outputlib import processing, views
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# read sequence data
full_filename = os.path.join(os.path.dirname(__file__),
'generic_chp.csv')
data = pd.read_csv(full_filename, sep=",")
# select periods
periods = len(data)-1
# create an energy system
idx = pd.date_range('1/1/2017', periods=periods, freq='H')
es = solph.EnergySystem(timeindex=idx)
Node.registry = es
# resources
bgas = solph.Bus(label='bgas')
rgas = solph.Source(label='rgas', outputs={bgas: solph.Flow()})
# heat
bth = solph.Bus(label='bth')
# dummy source at high costs that serves the residual load
source_th = solph.Source(label='source_th',
outputs={bth: solph.Flow(variable_costs=1000)})
demand_th = solph.Sink(label='demand_th', inputs={bth: solph.Flow(fixed=True,
actual_value=data['demand_th'], nominal_value=200)})
# power
bel = solph.Bus(label='bel')
demand_el = solph.Sink(label='demand_el', inputs={bel: solph.Flow(
variable_costs=data['price_el'])})
# back pressure turbine with same parameters as btp
# (for back pressure characteristics Q_CW_min=0 and back_pressure=True)
bpt = solph.components.GenericCHP(
label='back_pressure_turbine',
fuel_input={bgas: solph.Flow(
H_L_FG_share_max=[0.19 for p in range(0, periods)])},
electrical_output={bel: solph.Flow(
P_max_woDH=[200 for p in range(0, periods)],
P_min_woDH=[80 for p in range(0, periods)],
Eta_el_max_woDH=[0.53 for p in range(0, periods)],
Eta_el_min_woDH=[0.43 for p in range(0, periods)])},
heat_output={bth: solph.Flow(
Q_CW_min=[0 for p in range(0, periods)])},
Beta=[0. for p in range(0, periods)],
back_pressure=True)
# create an optimization problem and solve it
om = solph.Model(es)
# debugging
# om.write('generic_chp.lp', io_options={'symbolic_solver_labels': True})
# solve model
om.solve(solver='cbc', solve_kwargs={'tee': True})
# create result object
results = processing.results(om)
# plot data
if plt is not None:
# plot PQ diagram from component results
data = results[(bpt, None)]['sequences']
ax = data.plot(kind='scatter', x='Q', y='P', grid=True)
ax.set_xlabel('Q (MW)')
ax.set_ylabel('P (MW)')
plt.show()
# plot thermal bus
data = views.node(results, 'bth')['sequences']
ax = data.plot(kind='line', drawstyle='steps-post', grid=True)
ax.set_xlabel('Time (h)')
ax.set_ylabel('Q (MW)')
plt.show()
| gpl-3.0 |
beni55/dipy | doc/examples/brain_extraction_dwi.py | 13 | 3020 | """
===================================
Brain segmentation with median_otsu
===================================
We show how to extract brain information and mask from a b0 image using dipy's
segment.mask module.
First import the necessary modules:
"""
import numpy as np
import nibabel as nib
"""
Download and read the data for this tutorial.
The scil_b0 dataset contains different data from different companies and
models. For this example, the data comes from a 1.5 tesla Siemens MRI.
"""
from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0
fetch_scil_b0()
img = read_siemens_scil_b0()
data = np.squeeze(img.get_data())
"""
``img`` contains a nibabel Nifti1Image object. Data is the actual brain data as
a numpy ndarray.
Segment the brain using dipy's mask module.
``median_otsu`` returns the segmented brain data and a binary mask of the brain.
It is possible to fine tune the parameters of ``median_otsu`` (``median_radius``
and ``num_pass``) if extraction yields incorrect results but the default
parameters work well on most volumes. For this example, we used 2 as
``median_radius`` and 1 as ``num_pass``
"""
from dipy.segment.mask import median_otsu
b0_mask, mask = median_otsu(data, 2, 1)
"""
Saving the segmentation results is very easy using nibabel. We need the b0_mask,
and the binary mask volumes. The affine matrix which transform the image's
coordinates to the world coordinates is also needed. Here, we choose to save
both images in float32.
"""
mask_img = nib.Nifti1Image(mask.astype(np.float32), img.get_affine())
b0_img = nib.Nifti1Image(b0_mask.astype(np.float32), img.get_affine())
fname = 'se_1.5t'
nib.save(mask_img, fname + '_binary_mask.nii.gz')
nib.save(b0_img, fname + '_mask.nii.gz')
"""
Quick view of the results middle slice using matplotlib.
"""
import matplotlib.pyplot as plt
from dipy.core.histeq import histeq
sli = data.shape[2] / 2
plt.figure('Brain segmentation')
plt.subplot(1, 2, 1).set_axis_off()
plt.imshow(histeq(data[:, :, sli].astype('float')).T,
cmap='gray', origin='lower')
plt.subplot(1, 2, 2).set_axis_off()
plt.imshow(histeq(b0_mask[:, :, sli].astype('float')).T,
cmap='gray', origin='lower')
plt.savefig('median_otsu.png')
"""
.. figure:: median_otsu.png
:align: center
**An application of median_otsu for brain segmentation**.
``median_otsu`` can also automatically crop the outputs to remove the largest
possible number of background voxels. This makes outputted data significantly
smaller. auto cropping in ``median_otsu`` is activated by setting the
``autocrop`` parameter to True.
"""
b0_mask_crop, mask_crop = median_otsu(data, 4, 4, autocrop=True)
"""
Saving cropped data using nibabel as demonstrated previously.
"""
mask_img_crop = nib.Nifti1Image(mask_crop.astype(np.float32), img.get_affine())
b0_img_crop = nib.Nifti1Image(
b0_mask_crop.astype(np.float32), img.get_affine())
nib.save(mask_img_crop, fname + '_binary_mask_crop.nii.gz')
nib.save(b0_img_crop, fname + '_mask_crop.nii.gz')
| bsd-3-clause |
466152112/scikit-learn | sklearn/preprocessing/label.py | 35 | 28877 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
peterfpeterson/mantid | qt/python/mantidqt/widgets/workspacedisplay/matrix/io.py | 3 | 1554 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
from mantidqt.widgets.workspacedisplay.matrix.presenter import MatrixWorkspaceDisplay
from mantid.api import AnalysisDataService as ADS # noqa
class MatrixWorkspaceDisplayAttributes(object):
# WARNING: If you delete a tag from here instead of adding a new one, it will make old project files obsolete so
# just add an extra tag to the list e.g. ["InstrumentWidget", "IWidget"]
_tags = ["MatrixWorkspaceDisplayView"]
class MatrixWorkspaceDisplayEncoder(MatrixWorkspaceDisplayAttributes):
def __init__(self):
super(MatrixWorkspaceDisplayEncoder, self).__init__()
@staticmethod
def encode(obj, _=None):
return {"workspace": obj.presenter.model._ws.name()}
@classmethod
def tags(cls):
return cls._tags
class MatrixWorkspaceDisplayDecoder(MatrixWorkspaceDisplayAttributes):
def __init__(self):
super(MatrixWorkspaceDisplayDecoder, self).__init__()
@staticmethod
def decode(obj_dic, _=None):
import matplotlib.pyplot as plt
pres = MatrixWorkspaceDisplay(ADS.retrieve(obj_dic["workspace"]), plot=plt)
return pres.container
@classmethod
def tags(cls):
return cls._tags
| gpl-3.0 |
jaeilepp/mne-python | examples/visualization/make_report.py | 5 | 1576 | """
================================
Make an MNE-Report with a Slider
================================
In this example, MEG evoked data are plotted in an html slider.
"""
# Authors: Teon Brooks <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
###############################################################################
# Do standard folder parsing (this can take a couple of minutes):
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample')
report.parse_folder(meg_path)
###############################################################################
# Add a custom section with an evoked slider:
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='svg')
# to save report
# report.save('foobar.html', True)
| bsd-3-clause |
hgaspar/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
mmilutinovic1313/zipline-with-algorithms | zipline/finance/performance/position_tracker.py | 4 | 13048 | from __future__ import division
import logbook
import numpy as np
import pandas as pd
from pandas.lib import checknull
try:
# optional cython based OrderedDict
from cyordereddict import OrderedDict
except ImportError:
from collections import OrderedDict
from six import iteritems, itervalues
from zipline.finance.slippage import Transaction
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
import zipline.protocol as zp
from zipline.assets import (
Equity, Future
)
from zipline.finance.trading import with_environment
from . position import positiondict
log = logbook.Logger('Performance')
class PositionTracker(object):
def __init__(self):
# sid => position object
self.positions = positiondict()
# Arrays for quick calculations of positions value
self._position_amounts = OrderedDict()
self._position_last_sale_prices = OrderedDict()
self._position_value_multipliers = OrderedDict()
self._position_exposure_multipliers = OrderedDict()
self._position_payout_multipliers = OrderedDict()
self._unpaid_dividends = pd.DataFrame(
columns=zp.DIVIDEND_PAYMENT_FIELDS,
)
self._positions_store = zp.Positions()
@with_environment()
def _retrieve_asset(self, sid, env=None):
return env.asset_finder.retrieve_asset(sid)
def _update_multipliers(self, sid):
try:
self._position_value_multipliers[sid]
self._position_exposure_multipliers[sid]
self._position_payout_multipliers[sid]
except KeyError:
# Collect the value multipliers from applicable sids
asset = self._retrieve_asset(sid)
if isinstance(asset, Equity):
self._position_value_multipliers[sid] = 1
self._position_exposure_multipliers[sid] = 1
self._position_payout_multipliers[sid] = 0
if isinstance(asset, Future):
self._position_value_multipliers[sid] = 0
self._position_exposure_multipliers[sid] = \
asset.contract_multiplier
self._position_payout_multipliers[sid] = \
asset.contract_multiplier
def update_last_sale(self, event):
# NOTE, PerformanceTracker already vetted as TRADE type
sid = event.sid
if sid not in self.positions:
return 0
price = event.price
if checknull(price):
return 0
pos = self.positions[sid]
old_price = pos.last_sale_price
pos.last_sale_date = event.dt
pos.last_sale_price = price
self._position_last_sale_prices[sid] = price
# Calculate cash adjustment on assets with multipliers
return ((price - old_price) * self._position_payout_multipliers[sid]
* pos.amount)
def update_positions(self, positions):
# update positions in batch
self.positions.update(positions)
for sid, pos in iteritems(positions):
self._position_amounts[sid] = pos.amount
self._position_last_sale_prices[sid] = pos.last_sale_price
self._update_multipliers(sid)
def update_position(self, sid, amount=None, last_sale_price=None,
last_sale_date=None, cost_basis=None):
pos = self.positions[sid]
if amount is not None:
pos.amount = amount
self._position_amounts[sid] = amount
self._position_values = None # invalidate cache
self._update_multipliers(sid=sid)
if last_sale_price is not None:
pos.last_sale_price = last_sale_price
self._position_last_sale_prices[sid] = last_sale_price
self._position_values = None # invalidate cache
if last_sale_date is not None:
pos.last_sale_date = last_sale_date
if cost_basis is not None:
pos.cost_basis = cost_basis
def execute_transaction(self, txn):
# Update Position
# ----------------
sid = txn.sid
position = self.positions[sid]
position.update(txn)
self._position_amounts[sid] = position.amount
self._position_last_sale_prices[sid] = position.last_sale_price
self._update_multipliers(sid)
def handle_commission(self, commission):
# Adjust the cost basis of the stock if we own it
if commission.sid in self.positions:
self.positions[commission.sid].\
adjust_commission_cost_basis(commission)
@property
def position_values(self):
iter_amount_price_multiplier = zip(
itervalues(self._position_amounts),
itervalues(self._position_last_sale_prices),
itervalues(self._position_value_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
@property
def position_exposures(self):
iter_amount_price_multiplier = zip(
itervalues(self._position_amounts),
itervalues(self._position_last_sale_prices),
itervalues(self._position_exposure_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
def calculate_positions_value(self):
if len(self.position_values) == 0:
return np.float64(0)
return sum(self.position_values)
def calculate_positions_exposure(self):
if len(self.position_exposures) == 0:
return np.float64(0)
return sum(self.position_exposures)
def _longs_count(self):
return sum(1 for i in self.position_exposures if i > 0)
def _long_exposure(self):
return sum(i for i in self.position_exposures if i > 0)
def _long_value(self):
return sum(i for i in self.position_values if i > 0)
def _shorts_count(self):
return sum(1 for i in self.position_exposures if i < 0)
def _short_exposure(self):
return sum(i for i in self.position_exposures if i < 0)
def _short_value(self):
return sum(i for i in self.position_values if i < 0)
def _gross_exposure(self):
return self._long_exposure() + abs(self._short_exposure())
def _gross_value(self):
return self._long_value() + abs(self._short_value())
def _net_exposure(self):
return self.calculate_positions_exposure()
def _net_value(self):
return self.calculate_positions_value()
def handle_split(self, split):
if split.sid in self.positions:
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[split.sid]
leftover_cash = position.handle_split(split)
self._position_amounts[split.sid] = position.amount
self._position_last_sale_prices[split.sid] = \
position.last_sale_price
self._update_multipliers(split.sid)
return leftover_cash
def _maybe_earn_dividend(self, dividend):
"""
Take a historical dividend record and return a Series with fields in
zipline.protocol.DIVIDEND_FIELDS (plus an 'id' field) representing
the cash/stock amount we are owed when the dividend is paid.
"""
if dividend['sid'] in self.positions:
return self.positions[dividend['sid']].earn_dividend(dividend)
else:
return zp.dividend_payment()
def earn_dividends(self, dividend_frame):
"""
Given a frame of dividends whose ex_dates are all the next trading day,
calculate and store the cash and/or stock payments to be paid on each
dividend's pay date.
"""
earned = dividend_frame.apply(self._maybe_earn_dividend, axis=1)\
.dropna(how='all')
if len(earned) > 0:
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
self._unpaid_dividends = pd.concat(
[self._unpaid_dividends, earned],
)
def _maybe_pay_dividend(self, dividend):
"""
Take a historical dividend record, look up any stored record of
cash/stock we are owed for that dividend, and return a Series
with fields drawn from zipline.protocol.DIVIDEND_PAYMENT_FIELDS.
"""
try:
unpaid_dividend = self._unpaid_dividends.loc[dividend['id']]
return unpaid_dividend
except KeyError:
return zp.dividend_payment()
def pay_dividends(self, dividend_frame):
"""
Given a frame of dividends whose pay_dates are all the next trading
day, grant the cash and/or stock payments that were calculated on the
given dividends' ex dates.
"""
payments = dividend_frame.apply(self._maybe_pay_dividend, axis=1)\
.dropna(how='all')
# Mark these dividends as paid by dropping them from our unpaid
# table.
self._unpaid_dividends.drop(payments.index)
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
stock_payments = payments[payments['payment_sid'].notnull()]
for _, row in stock_payments.iterrows():
stock = row['payment_sid']
share_count = row['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
position = self.positions[stock]
position.amount += share_count
self._position_amounts[stock] = position.amount
self._position_last_sale_prices[stock] = position.last_sale_price
self._update_multipliers(stock)
# Add cash equal to the net cash payed from all dividends. Note that
# "negative cash" is effectively paid if we're short an asset,
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
net_cash_payment = payments['cash_amount'].fillna(0).sum()
return net_cash_payment
def create_close_position_transaction(self, event):
if not self._position_amounts.get(event.sid):
return None
txn = Transaction(
sid=event.sid,
amount=(-1 * self._position_amounts[event.sid]),
dt=event.dt,
price=event.price,
commission=0,
order_id=0
)
return txn
def get_positions(self):
positions = self._positions_store
for sid, pos in iteritems(self.positions):
if pos.amount == 0:
# Clear out the position if it has become empty since the last
# time get_positions was called. Catching the KeyError is
# faster than checking `if sid in positions`, and this can be
# potentially called in a tight inner loop.
try:
del positions[sid]
except KeyError:
pass
continue
# Note that this will create a position if we don't currently have
# an entry
position = positions[sid]
position.amount = pos.amount
position.cost_basis = pos.cost_basis
position.last_sale_price = pos.last_sale_price
return positions
def get_positions_list(self):
positions = []
for sid, pos in iteritems(self.positions):
if pos.amount != 0:
positions.append(pos.to_dict())
return positions
def __getstate__(self):
state_dict = {}
state_dict['positions'] = dict(self.positions)
state_dict['unpaid_dividends'] = self._unpaid_dividends
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PositionTracker saved state is too old.")
self.positions = positiondict()
# note that positions_store is temporary and gets regened from
# .positions
self._positions_store = zp.Positions()
self._unpaid_dividends = state['unpaid_dividends']
# Arrays for quick calculations of positions value
self._position_amounts = OrderedDict()
self._position_last_sale_prices = OrderedDict()
self._position_value_multipliers = OrderedDict()
self._position_exposure_multipliers = OrderedDict()
self._position_payout_multipliers = OrderedDict()
self.update_positions(state['positions'])
| apache-2.0 |
arahuja/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
manojgudi/sandhi | modules/gr36/gr-filter/examples/fir_filter_ccc.py | 13 | 3154 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_out = gr.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
joannadiong/biosig | biosig/emg/process.py | 1 | 8599 | import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
def remove_mean(data, plot=False):
"""
Remove the mean from a recorded signal.
:param data: data
:type data: ndarray
:param plot: show plot of original and mean-removed data
:type plot: bool
:return: data with mean removed
:rtype: ndarray
"""
data_removedmean = data - np.mean(data)
if plot:
plt.clf()
plt.close()
plt.plot(data, label='original')
plt.plot(data_removedmean, label='removed mean')
plt.legend(loc='best')
plt.show()
else:
pass
return data_removedmean
def filter_bandpass(data, freq, highpass=30, lowpass=500, plot=False):
"""
Apply bandpass filter to a recorded signal (usually EMG).
:param data: data
:type data: ndarray
:param freq: sampling rate (Hz)
:type freq: int
:param highpass: high pass cut-off (Hz)
:type highpass: int
:param lowpass: low pass cut-off (Hz)
:type lowpass: int
:param plot: show plot of original and filtered data
:type plot: bool
:return: filtered data
:rtype: ndarray
"""
freq_nyq = freq/2
high, low = highpass/freq_nyq, lowpass/freq_nyq
b, a = signal.butter(4, [high, low], btype='bandpass')
data_filt = signal.filtfilt(b, a, data)
if plot:
plt.clf()
plt.close()
plt.plot(data, label='original')
plt.plot(data_filt, label='filtered')
plt.legend(loc='best')
plt.show()
else:
pass
return data_filt
def rectify(data, plot=False):
"""
Rectify a recorded signal (usually EMG) to get absolute values.
:param data: data
:type data: ndarray
:param plot: show plot of original and rectified data
:type plot: bool
:return: rectified data
:rtype: ndarray
"""
data_rect = abs(data)
if plot:
plt.clf()
plt.close()
plt.plot(data, label='original')
plt.plot(data_rect, label='rectified')
plt.legend(loc='best')
plt.show()
else:
pass
return data_rect
def find_mvc(data, plot=False):
"""
Find index and value of MVC EMG at maximal voltage.
:param data: data
:type data: ndarray
:param plot: show plot of data and MVC value
:type plot: bool
:return: index and value at MVC EMG or force
:rtype: int or float
"""
mvc_value = np.max(data)
mvc_index = int(np.where(data == mvc_value)[0][0])
if plot:
plt.clf()
plt.close()
plt.plot(data)
plt.plot(mvc_index, mvc_value, 'ro')
plt.show()
else:
pass
return mvc_index, mvc_value
def calc_halfwin(freq, window):
"""
For a given window of time (ms), calculate half of the window width (samples).
:param freq: sampling rate (Hz)
:type freq: int
:param window: window of time (ms)
:type window: int
:return: half of the window width (samples)
:rtype: int
"""
halfwin = round((window / 1000) / 2 * freq)
return halfwin
def calc_mvc(data, mvc_index, mvc_value, freq, window, type='rms', plot=False):
"""
Calculate average MVC EMG using the root-mean-square ('rms')
or using the mean over a window of time across the peak EMG ('mean').
:param data: data
:type data: ndarray
:param mvc_index: index at MVC EMG or force
:type mvc_index: int
:param mvc_value: value of MVC EMG or force
:type mvc_value: float
:param freq: sampling rate (Hz)
:type freq: int
:param window: window of time (ms)
:type window: int
:param type: mean or rms
:type type: str
:param plot: show plot of data and window across peak EMG
:type plot: bool
:return: MVC EMG
:rtype: float
"""
halfwin = calc_halfwin(freq, window)
if type == 'rms':
mvc = np.sqrt(np.mean((data[mvc_index - halfwin : mvc_index + halfwin]) ** 2))
elif type == 'mean':
mvc = np.mean(data[mvc_index - halfwin : mvc_index + halfwin])
if plot:
plt.clf()
plt.close()
plt.plot(data)
plt.plot([mvc_index - halfwin, mvc_index + halfwin],
[mvc_value + (mvc_value/10), mvc_value + (mvc_value/10)],
'r-', linewidth=4)
plt.show()
else:
pass
return mvc
def calc_rms(data, freq, window, plot=False):
"""
Process a recorded signal (usually EMG) using a moving root-mean-square window.
Example:
import numpy as np
import matplotlib.pyplot as plt
data = np.random.rand(10000,1)
freq, window = 2000, 50
data_rms = calc_rms(data, freq, window)
plt.plot(data)
plt.plot(data_rms)
plt.show()
:param data: data
:type data: ndarray
:param freq: sampling rate (Hz)
:type freq: int
:param window: window of time (ms)
:type window: int
:param plot: show plot of original and rms data
:type plot: bool
:return: RMS data
:rtype: ndarray
"""
halfwin = calc_halfwin(freq, window)
# Initialise variable.
data_rms = np.zeros(data.size)
# Loop through and compute normalised moving window.
# Window is smaller at the start and the end of the signal.
for i in range(data.size - 1):
if i < halfwin:
data_rms[i] = np.sqrt(np.mean((data[0 : i + halfwin]) ** 2))
elif i > data.size - halfwin:
data_rms[i] = np.sqrt(np.mean((data[i - halfwin : data.size - 1]) ** 2))
else:
data_rms[i] = np.sqrt(np.mean((data[i - halfwin : i + halfwin]) ** 2))
if plot:
plt.clf()
plt.close()
plt.plot(data, label='original')
plt.plot(data_rms, 'r-', label='rms')
plt.legend(loc='best')
plt.show()
else:
pass
return data_rms
def calc_mean(data, mvc, freq, window, plot=False):
"""
Process a recorded signal (usually EMG) using a moving average normalised to MVC.
Example:
import numpy as np
import matplotlib.pyplot as plt
data = np.random.rand(10000,1)
freq, window = 2000, 50
data_mean = calc_mean(data, mvc, freq, window)
plt.plot(data)
plt.plot(mean_data)
plt.show()
:param data: data
:type data: ndarray
:param mvc: MVC EMG
:type mvc: float
:param freq: sampling rate (Hz)
:type freq: int
:param window: window of time (ms)
:type window: int
:param plot: show plot of original (V) and mean (%MVC) data
:type plot: bool
:return: mean data (% MVC)
:rtype: ndarray
"""
halfwin = calc_halfwin(freq, window)
# Initialise variable.
data_mean = np.zeros(data.size)
# Loop through and compute normalised moving window.
# Window is smaller at the start and the end of the signal.
for i in range(data.size - 1):
if i < halfwin:
data_mean[i] = np.mean(data[0: i + halfwin]) / mvc * 100
elif i > data.size - halfwin:
data_mean[i] = np.mean(data[i - halfwin: data.size - 1]) / mvc * 100
else:
data_mean[i] = np.mean(data[i - halfwin: i + halfwin]) / mvc * 100
if plot:
plt.clf()
plt.close()
fig, ax1 = plt.subplots()
ax1.plot(data, 'b-')
ax1.set_xlabel('sample')
ax1.set_ylabel('V', color='b')
ax1.tick_params('y', colors='b')
# plot two y-axes on same x-axis
ax2 = ax1.twinx()
ax2.plot(data_mean, 'r-')
ax2.set_ylabel('%MVC', color='r')
ax2.tick_params('y', colors='r')
fig.tight_layout()
plt.show()
else:
pass
return data_mean
def index_mvc_region(index_stim, index_mvc, window):
"""
Where a supramaximal stimulation has been delivered for an interpolated twitch,
index a window at MVC and ensure any stimulation artefact is excluded.
:param index_stim: index of the stimulation event
:type index_stim: int
:param index_mvc: index of the MVC
:type index_mvc: int
:param window: window of time (ms)
:type window: int
:return: start and stop indices of window at MVC
:rtype: int
"""
halfwin = int((window / 1000) / 2 * freq)
# if MVC is close to stim, index a window up to the index just before stim
if abs(index_stim - index_mvc) < halfwin:
start, stop = index_stim - (2 * halfwin) - 1, index_stim - 1
# if MVC is distant from stim, index a window over the MVC
else:
start, stop = index_mvc - halfwin, index_mvc + halfwin
return start, stop
| gpl-3.0 |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/projections/polar.py | 4 | 26829 | from __future__ import print_function
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def __getstate__(self):
return {}
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', 1)
self._default_theta_offset = kwargs.pop('theta_offset', 0)
self._default_theta_direction = kwargs.pop('theta_direction', 1)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(self._default_theta_offset)
self.set_theta_direction(self._default_theta_direction)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
def set_rscale(self, *args, **kwargs):
return Axes.set_yscale(self, *args, **kwargs)
def set_rticks(self, *args, **kwargs):
return Axes.set_yticks(self, *args, **kwargs)
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). e.g., 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
angles = self.convert_yunits(angles)
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
radii = self.convert_xunits(radii)
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# to keep things all self contained, we can put aliases to the Polar classes
# defined above. This isn't strictly necessary, but it makes some of the
# code more readable (and provides a backwards compatible Polar API)
PolarAxes.PolarTransform = PolarTransform
PolarAxes.PolarAffine = PolarAffine
PolarAxes.InvertedPolarTransform = InvertedPolarTransform
PolarAxes.ThetaFormatter = ThetaFormatter
PolarAxes.RadialLocator = RadialLocator
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
| mit |
ericmjl/reassortment-simulation-and-reconstruction | simulator.py | 1 | 15373 | """
Author: Eric J. Ma
Affiliation: Massachusetts Institute of Technology
"""
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from random import randint
from Levenshtein import distance
from copy import deepcopy
import networkx as nx
class Simulator(object):
"""The Simulator class defines how a simulation run happens."""
def __init__(self):
"""
Initialize the simulator.
ATTRIBUTES:
- INT: current_time
The current time in the simulator. It is initialized to 0, and
can be reset to 0 using the reset() function.
- LIST: pathogens
A list of pathogens currently present in the simulator. It is
initialized to an empty list, and can be rest to an empty list
using the reset() function.
"""
super(Simulator, self).__init__()
self.current_time = 0
self.pathogens = []
self.node_draw_positions = None
# All the properties that need to be pre-computed are initialized to None here first. They are computed once.
self._reassortant_edges = None
self._relabeled_transmission_graph = None
self._full_transmission_graph = None
self._full_transmission_paths = None
self._transmission_paths = None
self._transmission_graph = None
def increment_timestep(self):
"""
This is the customizable part of the simulator. In the actual
simulation code, one has to subclass the Simulator and implement
increment_timestep. All other methods are available to the subclass.
NOTE: self.current_time is set in the run() function, not in the
increment_timestep() function. Do not change that logic here.
"""
pass
def run(self, timesteps):
"""
This method runs the simulation for as many timesteps are specified.
Nothing is returned.
INPUTS:
- INT: timesteps
The number of time steps that the simulation has to run.
"""
for i in range(1, timesteps):
self.current_time = i
self.increment_timestep()
def reset(self):
"""
This resets the Simulator object by setting:
- self.current_time: 0
- self.pathogens: [] #empty list
"""
self.current_time = 0
self.pathogens = []
def add_pathogens(self, pathogens):
"""
This method takes in a list of pathogens and adds it to the current
list of pathogens in the simulation.
INPUTS:
- ITERABLE: pathogens
A list of Pathogen objects that will be added to self.pathogens
"""
self.pathogens.extend(pathogens)
def write_sequences(self, outfile_name, folder_name=None):
"""
This method writes the sequences of every Pathogen object to a single
FASTA file.
INPUTS:
- STRING: outfile_name
The desired filename, without the ".fasta" extension.
- STRING: folder_name (optional)
The folder in which the FASTA files are going to be stored
"""
sequences = []
for pathogen in self.pathogens:
creation_time = pathogen.creation_time
for segment in pathogen.segments:
segment_number = segment.segment_number
sequence_string = Seq(segment.compute_sequence())
id_tuple = (pathogen.id, segment_number, creation_time)
sequence = SeqRecord(sequence_string, id="%s|%s|%s" % id_tuple)
sequences.append(sequence)
if folder_name == None:
output_handle = open('%s.fasta' % outfile_name, 'w+')
else:
output_handle = open('%s/%s.fasta' % (folder_name, outfile_name), \
'w+')
SeqIO.write(sequences, output_handle, 'fasta')
output_handle.close()
@property
def transmission_graph(self):
"""
This method creates the ground truth transmission graph in memory.
NOTE: The data structure of the edges has to match the data structure
of the edges in the reconstruction. As of 27 June 2014, Reconstructor
graph edges have the following dictionary attributes:
- 'segments' = [1, 4] (LIST: INT segment numbers)
- 'weight' = 9 (INT Levenshtein distance between two isolates)
NOTE: To draw to screen, call on draw_transmission_graph().
NOTE: We have defined a few helper functions to get the sequences of
each segment in a pathogen.
"""
#################### BEGIN HELPER FUNCTIONS ###########################
def edge_levenshtein_distance(parent, progeny, segments):
"""
This method computes the total Levenshtein distance over all
segments that were transmitted from parent to progeny. The parent
and progeny are Pathogen objects, and segments is a list of
integer numbers.
"""
lev_dist = 0
for segment in segments:
lev_dist += segment_levdist(parent, progeny, segment)
return lev_dist
def segment_levdist(pathogen1, pathogen2, segment_number):
"""
This method returns the Levenshtein distance between two
pathogens' stipulated segment.
"""
# Get the sequence for each of the segments
segment1 = pathogen1.get_segment(segment_number).compute_sequence()
segment2 = pathogen2.get_segment(segment_number).compute_sequence()
# Compute the Levenshtein distance
lev_dist = distance(segment1, segment2)
return lev_dist
#################### END HELPER FUNCTIONS #############################
#################### BEGIN MAIN LOGIC #################################
if self._transmission_graph == None:
transmission_graph = nx.DiGraph()
for pathogen in self.pathogens:
transmission_graph.add_node(pathogen, \
creation_time=pathogen.creation_time)
# Pass if the parent is empty - this means that the pathogen was a seed pathogen
if len(pathogen.parent) == 0:
pass
# Otherwise, add each edge with the weight.
else:
for parent, segments in pathogen.parent.items():
if len(segments) != 0:
weight = edge_levenshtein_distance(parent, pathogen, segments)
transmission_graph.add_edge(parent, pathogen, weight=weight, segments=segments)
self._transmission_graph = transmission_graph
return self._transmission_graph
#################### END MAIN LOGIC ###################################
def draw_transmission_graph(self, positions=False):
"""
This method draws the transmission graph to the screen using
matplotlib.
INPUTS:
- BOOLEAN: positions
If False: the circular layout will be drawn.
If True: nodes will be restricted in the x-axis.
"""
# Step 1: Guarantee that transmission_graph is made.
transmission_graph = deepcopy(self.relabeled_transmission_graph)
# Step 2: Draw the graph according to the time-restricted layout or
# circular layout.
if positions == False:
nx.draw_circular(transmission_graph)
if positions == True:
if self.node_draw_positions == None:
positions = dict()
for pathogen in self.pathogens:
positions[str(pathogen)] = (pathogen.creation_time, randint(0, 20))
self.node_draw_positions = positions
nx.draw(transmission_graph, pos=self.node_draw_positions)
def write_transmission_graph(self, outfile_name, folder_name=None):
"""
This method writes the ground truth transmission network as a NetworkX
pickle file.
INPUTS:
- STRING: outfile_name
The desired filename, without the ".gpickle" extension.
- STRING: folder_name (optional)
The folder in which the networkX gpickle files are going to be
stored
"""
transmission_graph = deepcopy(self.transmission_graph)
if folder_name == None:
output_handle = open('%s.gpickle' % outfile_name, 'w+')
else:
output_handle = open('%s/%s.gpickle' % (folder_name, outfile_name)\
, 'w+')
nx.write_gpickle(transmission_graph, output_handle)
def reassortants(self):
"""
This method will return the reassortant pathogens that are present in
the simulation. The reassortant pathogens are identifiable using their
is_reassortant() function.
"""
reassortants = []
for pathogen in self.pathogens:
if pathogen.is_reassorted():
reassortants.append(pathogen)
return reassortants
@property
def reassortant_edges(self):
"""
This method will return the edges that connect to reassortants as a
list.
"""
if self._reassortant_edges == None:
edges = []
for reassortant in self.reassortants():
in_edges = \
self.relabeled_transmission_graph.in_edges(str(reassortant), data=True)
edges.extend(in_edges)
# for edge in in_edges:
# edges.append(edge)
self._reassortant_edges = edges
return self._reassortant_edges
@property
def relabeled_transmission_graph(self):
"""
This method will return a relabeled_transmission_graph, with the nodes
relabeled as strings rather than pathogen objects.
"""
if self._relabeled_transmission_graph == None:
# Call on.transmission_graph to guarantee that the graph is
# created.
transmission_graph = deepcopy(self.transmission_graph)
# Create mapping from object to string
mapping = dict()
for node in transmission_graph.nodes():
mapping[node] = str(node)
# Relabel the transmission graph in a copy of the transmission graph
relabeled = nx.relabel_nodes(transmission_graph, mapping)
self._relabeled_transmission_graph = relabeled
return self._relabeled_transmission_graph
@property
def full_transmission_graph(self):
"""
This method will generate the full transmission graph from the
relabeled transmission graph. This is done by iterating over the edges
present in the graph. If the progeny node in the edge is a
reassortant, then remove the edge. Otherwise, pass.
"""
if self._full_transmission_graph == None:
# Call on relabel_transmission_graph() to guarantee that the graph is
# created.
full_graph = deepcopy(self.relabeled_transmission_graph)
# Identify the reassortants, and then recast them as a list of
# strings, rather than a list of objects.
reassortants = self.reassortants()
reassortants = [str(item) for item in reassortants]
for edge in full_graph.edges():
progeny = edge[1]
# This is the criteria for removal of an edge. If the progeny node
# is a reassortant, then the edge is not a full transmission edge.
# Therefore, the progeny node should not be in reassortant for the
# edge to be kept. Otherwise, the edge is removed.
if progeny in reassortants:
full_graph.remove_edge(edge[0], edge[1])
else:
pass
self._full_transmission_graph = full_graph
return self._full_transmission_graph
@property
def full_transmission_paths(self):
"""
This method will update and return the set of full transmission paths
between all nodes present in the simulation. The exact structure is a
disjoint set. We first create a singleton set for each node in the
transmission graph. We then iterate over each edge and union the sets
containing the nodes.
NOTE: We are going to use the relabeled transmission graph in this
case, rather than the original transmission graph, as this will yield
a set of strings that can be compared with the reconstruction, which
also uses strings as node labels.
OUTPUTS:
- LIST of SETS: paths
A list of disjoint sets, for which in each set, a path exists
between each of the nodes.
"""
if self._full_transmission_paths == None:
full_graph = self.full_transmission_graph
paths = identify_paths(full_graph)
self._full_transmission_paths = paths
return self._full_transmission_paths
def full_transmission_path_exists(self, node1, node2):
"""
This method will return True if a full transmission path exists
between two nodes.
INPUTS:
- STRING: node1, node2
The nodes that we are using for path identification are
strings. Therefore, node1 and node2 are strings. They are the
labels of the nodes present in the graph.
OUTPUTS:
- BOOLEAN result that tells us if a path exists between the two
nodes.
"""
boolean = False
paths = self.full_transmission_paths
for path in paths:
if node1 in path and node2 in path:
boolean = True
break
return boolean
def segment_transmission_graph(self, segment):
"""
This method will iterate over all of the edges in the relabeled
transmission graph, and if the edge's segment attribute does not
contian the segment specified, then the edge will be removed.
INPUTS:
- INTEGER: segment
The segment number for which we want the segment transmission
graph.
OUTPUTS:
- NETWORKX DIGRAPH: seg_graph
The segment transmission graph.
"""
seg_graph = deepcopy(self.relabeled_transmission_graph)
for edge in seg_graph.edges(data=True):
if segment not in edge[2]['segments']:
seg_graph.remove_edge(edge[0], edge[1])
return seg_graph
def segment_transmission_paths(self, segment):
"""
This method will return the segment transmission paths for a specified
segment.
INPUTS:
- INTEGER: segment
An integer that specifies the segment for which the
transmission paths are to be found.
OUTPUTS:
- LIST of SETS: paths
A list of disjoint sets that describes which nodes are
connected by paths.
"""
seg_graph = self.segment_transmission_graph(segment=segment)
paths = identify_paths(seg_graph)
return paths
def segment_transmission_path_exists(self, node1, node2, segment):
"""
This method will return True if a full transmission path exists
between two nodes.
INPUTS:
- STRING: node1, node2
The nodes that we are using for path identification are
strings. Therefore, node1 and node2 are strings. They are the
labels of the nodes present in the graph.
- INTEGER: segment
The segment number for which we want to know the segment
transmission path.
OUTPUTS:
- BOOLEAN result that tells us if a path exists between the two
nodes.
"""
boolean = False
paths = self.segment_transmission_paths(segment=segment)
for path in paths:
if node1 in path and node2 in path:
boolean = True
break
return boolean
#################### BEGIN HELPER METHODS FOR PATH FINDING ####################
def identify_paths(graph):
"""
This method takes in a graph and returns a list of sets that identify
which nodes have a path between them.
INPUTS:
- NETWORKX GRAPH: graph
The graph on which paths are to be found.
OUTPUTS:
- LIST of SETS: paths
A list of sets in which nodes that are within the same set have a
path between them.
"""
paths = []
# Step 1: Initialize singleton sets for each of the nodes in the
# transmission graph.
for node in graph.nodes():
paths.append(set([node]))
# Step 2: Iterate over all the edges. Find the sets that contain the
# two nodes, and union them.
for edge in graph.edges():
path1 = find_set_with_element(paths, edge[0])
path2 = find_set_with_element(paths, edge[1])
if path1 != path2:
new_path = path1.union(path2)
paths.pop(paths.index(path1))
paths.pop(paths.index(path2))
paths.append(new_path)
return paths
def find_set_with_element(paths, element):
"""
This method will return the set that contains the specified
element.
INPUTS:
- LIST of SETS: paths
A list of sets in which nodes that are within the same set have a
path between them.
- NODE: element
A node within a NetworkX graph.
OUTPUTS:
- SET: path
The set of nodes that are connected with each other that contains
the query node 'element'.
"""
for path in paths:
if element in path:
return path
#################### END HELPER METHODS FOR PATH FINDING ######################
| mit |
soulmachine/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
adamgreenhall/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
fengzhyuan/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
copperwire/SIMS | plotting/plotting_module.py | 1 | 1820 | from file_handler import file_handler
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import numpy as np
class plotter:
def __init__(self, filename):
self.filename = filename
def pathfinder(self):
"""Find full path to filename """
def plot_machine(self):
class_instance = file_handler(self.filename)
class_instance.file_iteration()
data_sets = class_instance.data_conversion()
names = getattr(class_instance, "substances")
if len(names) > 2:
host = host_subplot(111, axes_class = AA.Axes)
plt.subplots_adjust(right = 0.75)
par1 = host.twinx()
par2 = host.twinx()
host.set_yscale("log")
par1.set_yscale("log")
par2.set_yscale("log")
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all = True)
host.set_xlabel(data_sets[0]["x_unit"])
plotty_things = [host, par1, par2]
for data_set, name, things in zip(data_sets, names, plotty_things):
x_val = data_set["data"]["x"]
y_val = data_set["data"]["y"]
x_unit = data_set["x_unit"]
y_unit = data_set["y_unit"]
things.set_ylabel(y_unit)
things.plot(x_val, y_val, label = data_set["sample element"])
plt.legend()
plt.show()
else:
data_set = data_sets[0]
x_val = data_set["data"][0]
y_val = data_set["data"][1]
x_val = x_val.copy(order = "C")
x_unit = data_set["x_unit"]
y_unit = data_set["y_unit"]
plt.semilogy(x_val, y_val, label = data_set["sample info"][2], nonposy = "clip")
plt.xlabel(x_unit)
plt.ylabel(y_unit)
plt.legend()
plt.show()
| cc0-1.0 |
jjx02230808/project0223 | examples/decomposition/plot_pca_vs_lda.py | 176 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
oscarbranson/apt-tools | notebooks/apt_importers.py | 1 | 4962 | import pandas as pd
import struct
def read_pos(f):
""" Loads an APT .pos file as a pandas dataframe.
Columns:
x: Reconstructed x position
y: Reconstructed y position
z: Reconstructed z position
Da: mass/charge ratio of ion"""
# read in the data
n = len(file(f).read())/4
d = struct.unpack('>'+'f'*n,file(f).read(4*n))
# '>' denotes 'big-endian' byte order
# unpack data
pos = pd.DataFrame({'x': d[0::4],
'y': d[1::4],
'z': d[2::4],
'Da': d[3::4]})
return pos
def read_epos(f):
"""Loads an APT .epos file as a pandas dataframe.
Columns:
x: Reconstructed x position
y: Reconstructed y position
z: Reconstructed z position
Da: Mass/charge ratio of ion
ns: Ion Time Of Flight
DC_kV: Potential
pulse_kV: Size of voltage pulse (voltage pulsing mode only)
det_x: Detector x position
det_y: Detector y position
pslep: Pulses since last event pulse (i.e. ionisation rate)
ipp: Ions per pulse (multihits)
[x,y,z,Da,ns,DC_kV,pulse_kV,det_x,det_y,pslep,ipp].
pslep = pulses since last event pulse
ipp = ions per pulse
When more than one ion is recorded for a given pulse, only the
first event will have an entry in the "Pulses since last evenT
pulse" column. Each subsequent event for that pulse will have
an entry of zero because no additional pulser firings occurred
before that event was recorded. Likewise, the "Ions Per Pulse"
column will contain the total number of recorded ion events for
a given pulse. This is normally one, but for a sequence of records
a pulse with multiply recorded ions, the first ion record will
have the total number of ions measured in that pulse, while the
remaining records for that pulse will have 0 for the Ions Per
Pulse value.
~ Appendix A of 'Atom Probe tomography: A Users Guide',
notes on ePOS format."""
# read in the data
n = len(file(f).read())/4
rs = n / 11
d = struct.unpack('>'+'fffffffffII'*rs,file(f).read(4*n))
# '>' denotes 'big-endian' byte order
# unpack data
pos = pd.DataFrame({'x': d[0::11],
'y': d[1::11],
'z': d[2::11],
'Da': d[3::11],
'ns': d[4::11],
'DC_kV': d[5::11],
'pulse_kV': d[6::11],
'det_x': d[7::11],
'det_y': d[8::11],
'pslep': d[9::11], # pulses since last event pulse
'ipp': d[10::11]}) # ions per pulse
return pos
def read_rrng(f):
"""Loads a .rrng file produced by IVAS. Returns two dataframes of 'ions'
and 'ranges'."""
import re
rf = open(f,'r').readlines()
patterns = re.compile(r'Ion([0-9]+)=([A-Za-z0-9]+).*|Range([0-9]+)=(\d+.\d+) +(\d+.\d+) +Vol:(\d+.\d+) +([A-Za-z:0-9 ]+) +Color:([A-Z0-9]{6})')
ions = []
rrngs = []
for line in rf:
m = patterns.search(line)
if m:
if m.groups()[0] is not None:
ions.append(m.groups()[:2])
else:
rrngs.append(m.groups()[2:])
ions = pd.DataFrame(ions, columns=['number','name'])
ions.set_index('number',inplace=True)
rrngs = pd.DataFrame(rrngs, columns=['number','lower','upper','vol','comp','colour'])
rrngs.set_index('number',inplace=True)
rrngs[['lower','upper','vol']] = rrngs[['lower','upper','vol']].astype(float)
rrngs[['comp','colour']] = rrngs[['comp','colour']].astype(str)
return ions,rrngs
def label_ions(pos,rrngs):
"""labels ions in a .pos or .epos dataframe (anything with a 'Da' column)
with composition and colour, based on an imported .rrng file."""
pos['comp'] = ''
pos['colour'] = '#FFFFFF'
for n,r in rrngs.iterrows():
pos.loc[(pos.Da >= r.lower) & (pos.Da <= r.upper),['comp','colour']] = [r['comp'],'#' + r['colour']]
return pos
def deconvolve(lpos):
"""Takes a composition-labelled pos file, and deconvolves
the complex ions. Produces a dataframe of the same input format
with the extra columns:
'element': element name
'n': stoichiometry
For complex ions, the location of the different components is not
altered - i.e. xyz position will be the same for several elements."""
import re
out = []
pattern = re.compile(r'([A-Za-z]+):([0-9]+)')
for g,d in lpos.groupby('comp'):
if g is not '':
for i in range(len(g.split(' '))):
tmp = d.copy()
cn = pattern.search(g.split(' ')[i]).groups()
tmp['element'] = cn[0]
tmp['n'] = cn[1]
out.append(tmp.copy())
return pd.concat(out)
| gpl-2.0 |
harshaneelhg/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
VoigtLab/dnaplotlib | gallery/sbol_visual/sbol_visual.py | 1 | 1222 | __author__ = 'user'
import dnaplotlib as dpl
import dnaplotlib.sbol as dpl_sbol
import sbol as sbol
import matplotlib.pyplot as plt
# Import the SBOL design file
doc = sbol.Document()
doc.read('gene_cassette.sbol')
# In this case, we know ahead of time the URI of the design. In some cases, you may have to explore the doc's components to find the design you are looking for
design = doc.components['http://sbolstandard.org/examples/Design']
print(design.display_id)
print(design)
# Create the DNAplotlib renderer
dr = dpl_sbol.SBOLRenderer()
# Instantiate rendered
part_renderers = dr.SBOL_part_renderers()
# Create the figure
fig = plt.figure()
ax = plt.gca()
start, end = dr.renderSBOL(ax, design, part_renderers) # Render SBOL. This function has parallel structure to renderDNA
# Give the figure a title
dpl.write_label(ax, design.display_id, (start+end)/2, { 'label_size' : 18, 'label_y_offset': 12 })
# Configure plot
ax.set_xlim([start, end])
ax.set_ylim([-18,18])
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
# Save the figure
fig.savefig('sbol_visual.pdf', transparent=True)
fig.savefig('sbol_visual.png', dpi=300)
# Clear the plotting cache
#plt.close('all')
plt.show()
| mit |
q1ang/tushare | tushare/stock/classifying.py | 11 | 8914 | # -*- coding:utf-8 -*-
"""
获取股票分类数据接口
Created on 2015/02/01
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
import json
import re
from pandas.util.testing import _network_error_classes
import time
import tushare.stock.fundamental as fd
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_industry_classified():
"""
获取行业分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :行业名称
"""
df = _get_type_data(ct.SINA_INDUSTRY_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['ids']))
data = []
ct._write_head()
for row in df.values:
rowDf = _get_detail(row[0])
rowDf['c_name'] = row[1]
data.append(rowDf)
data = pd.concat(data, ignore_index=True)
return data
def get_concept_classified():
"""
获取概念分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :概念名称
"""
ct._write_head()
df = _get_type_data(ct.SINA_CONCEPTS_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sf'], ct.PAGES['cpt']))
data = []
for row in df.values:
rowDf = _get_detail(row[0])
rowDf['c_name'] = row[1]
data.append(rowDf)
data = pd.concat(data,ignore_index=True)
return data
def get_area_classified():
"""
获取地域分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
area :地域名称
"""
df = fd.get_stock_basics()
df = df[['name', 'area']]
df.reset_index(level=0, inplace=True)
df = df.sort('area').reset_index(drop=True)
return df
def get_gem_classified():
"""
获取创业板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.code.str[0] == '3']
df = df.sort('code').reset_index(drop=True)
return df
def get_sme_classified():
"""
获取中小板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.code.str[0:3] == '002']
df = df.sort('code').reset_index(drop=True)
return df
def get_st_classified():
"""
获取风险警示板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.name.str.contains('ST')]
df = df.sort('code').reset_index(drop=True)
return df
def _get_detail(tag, retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
try:
ct._write_console()
request = Request(ct.SINA_DATA_DETAIL_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['jv'],
tag))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
except _network_error_classes:
pass
else:
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
jstr = json.dumps(text)
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}), columns=ct.THE_FIELDS)
df = df[ct.FOR_CLASSIFY_B_COLS]
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _get_type_data(url):
try:
request = Request(url)
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str.split('=')[1]
data_json = json.loads(data_str)
df = pd.DataFrame([[row.split(',')[0], row.split(',')[1]] for row in data_json.values()],
columns=['tag', 'name'])
return df
except Exception as er:
print(str(er))
def get_hs300s():
"""
获取沪深300当前成份股及所占权重
Return
--------
DataFrame
code :股票代码
name :股票名称
date :日期
weight:权重
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['hs300b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
wt = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['hs300w']), parse_cols=[0,4,8])
wt.columns = ct.FOR_CLASSIFY_W_COLS
wt['code'] = wt['code'].map(lambda x :str(x).zfill(6))
return pd.merge(df,wt)
except Exception as er:
print(str(er))
def get_sz50s():
"""
获取上证50成份股
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['sz50b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
return df
except Exception as er:
print(str(er))
def get_zz500s():
"""
获取中证500成份股
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['zz500b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
return df
except Exception as er:
print(str(er))
def get_terminated():
"""
获取终止上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.TERMINATED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def get_suspended():
"""
获取暂停上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.SUSPENDED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
sabi0/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_vars.py | 2 | 22154 | """ pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import pickle
from _pydevd_bundle.pydevd_constants import get_frame, get_thread_id, xrange
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
from _pydev_imps._pydev_saved_modules import thread
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, quote, execfile
from _pydevd_bundle.pydevd_utils import to_string
SENTINEL_VALUE = []
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return thread_id in AdditionalFramesContainer.additional_frames
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if thread_id in AdditionalFramesContainer.additional_frames:
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
errMsg = '''find_frame: frame not found.
Looking for thread_id:%s, frame_id:%s
Current thread_id:%s, available frames:
%s\n
''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
sys.stderr.write(errMsg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, attrs,))
traceback.print_exc()
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
attr_list = attrs.split('\t')
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = 100
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)))
return xml
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise Exception("%s has more than 2 dimensions." % slice)
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in "biufc":
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
xml = slice_to_xml(name, num_rows, num_cols, "", "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in "biufc":
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in "biufc" else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
def col_to_format(c):
return format if dtypes[c] == 'f' and format else array_default_format(dtypes[c])
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, lambda r: (("%" + col_to_format(c)) % (df.iat[r, c] if dim > 1 else df.iat[r])
for c in range(cols)))
return xml
def array_data_to_xml(rows, cols, get_row):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % to_string(row)
for value in get_row(row):
xml += var_to_xml(value, '')
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, format, type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = get_label(df.axes[1].values[col]) if dim > 1 else str(col)
bounds = col_bounds[col]
col_format = "%" + col_to_format(col)
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(col), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
TYPE_TO_XML_CONVERTERS = {"ndarray": array_to_xml, "DataFrame": dataframe_to_xml, "Series": dataframe_to_xml}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
| apache-2.0 |
potash/scikit-learn | sklearn/decomposition/__init__.py | 76 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/metrics/__init__.py | 13 | 3826 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import balanced_accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import check_scoring
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'balanced_accuracy_score',
'calinski_harabaz_score',
'check_scoring',
'classification_report',
'cluster',
'cohen_kappa_score',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'fowlkes_mallows_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
keelanfh/electionary | analysis/sentiment-time-party-graph.py | 1 | 2466 | import csv
import json
import os
import matplotlib.pyplot as plt
import commonfunctions as cf
root_directory = os.path.abspath(os.path.dirname(os.path.abspath(os.curdir)))
directory = os.path.join(root_directory, cf.working_directory)
with open('sentiment-time-party.json', 'r') as f:
data = json.load(f)
r, d = None, None
for datum in data:
if datum['party'] == 'r':
r = datum['year_results']
if datum['party'] == 'd':
d = datum['year_results']
repPositiveData = [(x['year'], x['positive']) for x in r]
repNegativeData = [(x['year'], x['negative']) for x in r]
demPositiveData = [(x['year'], x['positive']) for x in d]
demNegativeData = [(x['year'], x['negative']) for x in d]
years = list(set([x['year'] for x in r]).union([x['year'] for x in d])).sort()
# This bit writes to a file. Useful if you want a table of results
with open('results.csv', 'w') as f:
dw = csv.DictWriter(f, r[0].keys())
csv.writer(f, ['Sentiment in Republican Debates'])
dw.writeheader()
dw.writerows(r)
csv.writer(f, ['Sentiment in Democrat Debates'])
dw.writeheader()
dw.writerows(d)
plt.style.use('ggplot')
fig = plt.figure(0)
ax = fig.gca()
ax.grid(b=False)
ax.set_axis_bgcolor('white')
labels = ['Republican', 'Democrat']
colors = ['#ef8a62', '#67a9cf']
for labelno, data in enumerate([repNegativeData, demNegativeData]):
data2 = zip(*data)
ax.plot(data2[0], data2[1], label=labels[labelno], color=colors[labelno], lw=2.5)
ax.set_xlim([1956, 2020])
ax.set_xticks(xrange(1960, 2020, 8))
ax.legend()
ax.set_xlabel('Year')
ax.set_ylabel('Proportion of words in dictionaries')
ax.set_title('Negative Sentiment over time in US democratic/republican election debates', y=1.05)
plt.savefig(os.path.join(root_directory, 'images', 'analysis-sentiment-time-party-negative.svg'), format='svg')
fig = plt.figure(1)
ax = fig.gca()
ax.grid(b=False)
ax.set_axis_bgcolor('white')
ax.set_xlim([1956, 2020])
ax.set_xticks(xrange(1960, 2020, 8))
for labelno, data in enumerate([repPositiveData, demPositiveData]):
data2 = zip(*data)
ax.plot(data2[0], data2[1], color=colors[labelno], label=labels[labelno], lw=2.5)
ax.legend()
ax.set_xlabel('Year')
ax.set_ylabel('Proportion of words in dictionaries')
ax.set_title('Positive Sentiment over time in US democratic/republican election debates', y=1.05)
plt.savefig(os.path.join(root_directory, 'images', 'analysis-sentiment-time-party-positive.svg'), format='svg')
| mit |
IshankGulati/scikit-learn | examples/hetero_feature_union.py | 81 | 6241 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to scikit-learn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this example faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
Alexander-P/Isca | src/extra/python/scripts/gfdl_grid_files/grid_file_generator.py | 4 | 1170 | import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# specify resolution
t_res = 42
#read in grid from approriate file
resolution_file = Dataset('t'+str(t_res)+'_atmos_daily.nc', 'r', format='NETCDF3_CLASSIC')
lons = resolution_file.variables['lon'][:]
lats = resolution_file.variables['lat'][:]
lonsb = resolution_file.variables['lonb'][:]
latsb = resolution_file.variables['latb'][:]
nlon=lons.shape[0]
nlat=lats.shape[0]
nlonb=lonsb.shape[0]
nlatb=latsb.shape[0]
output_file = Dataset('t'+str(t_res)+'.nc', 'w', format='NETCDF3_CLASSIC')
lat = output_file.createDimension('lat', nlat)
lon = output_file.createDimension('lon', nlon)
latb = output_file.createDimension('latb', nlatb)
lonb = output_file.createDimension('lonb', nlonb)
latitudes = output_file.createVariable('lat','f4',('lat',))
longitudes = output_file.createVariable('lon','f4',('lon',))
latitudesb = output_file.createVariable('latb','f4',('latb',))
longitudesb = output_file.createVariable('lonb','f4',('lonb',))
latitudes[:] = lats
longitudes[:] = lons
latitudesb[:] = latsb
longitudesb[:] = lonsb
output_file.close()
| gpl-3.0 |
nagamanicg/ml_lab_ecsc_306 | labwork/lab7/sci-learn/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| apache-2.0 |
astocko/statsmodels | statsmodels/stats/sandwich_covariance.py | 6 | 27945 | # -*- coding: utf-8 -*-
"""Sandwich covariance estimators
Created on Sun Nov 27 14:10:57 2011
Author: Josef Perktold
Author: Skipper Seabold for HCxxx in linear_model.RegressionResults
License: BSD-3
Notes
-----
for calculating it, we have two versions
version 1: use pinv
pinv(x) scale pinv(x) used currently in linear_model, with scale is
1d (or diagonal matrix)
(x'x)^(-1) x' scale x (x'x)^(-1), scale in general is (nobs, nobs) so
pretty large
general formulas for scale in cluster case are in
http://pubs.amstat.org/doi/abstract/10.1198/jbes.2010.07136 which also
has the second version
version 2:
(x'x)^(-1) S (x'x)^(-1) with S = x' scale x, S is (kvar,kvars),
(x'x)^(-1) is available as normalized_covparams.
S = sum (x*u) dot (x*u)' = sum x*u*u'*x' where sum here can aggregate
over observations or groups. u is regression residual.
x is (nobs, k_var)
u is (nobs, 1)
x*u is (nobs, k_var)
For cluster robust standard errors, we first sum (x*w) over other groups
(including time) and then take the dot product (sum of outer products)
S = sum_g(x*u)' dot sum_g(x*u)
For HAC by clusters, we first sum over groups for each time period, and then
use HAC on the group sums of (x*w).
If we have several groups, we have to sum first over all relevant groups, and
then take the outer product sum. This can be done by summing using indicator
functions or matrices or with explicit loops. Alternatively we calculate
separate covariance matrices for each group, sum them and subtract the
duplicate counted intersection.
Not checked in details yet: degrees of freedom or small sample correction
factors, see (two) references (?)
This is the general case for MLE and GMM also
in MLE hessian H, outerproduct of jacobian S, cov_hjjh = HJJH,
which reduces to the above in the linear case, but can be used
generally, e.g. in discrete, and is misnomed in GenericLikelihoodModel
in GMM it's similar but I would have to look up the details, (it comes
out in sandwich form by default, it's in the sandbox), standard Newey
West or similar are on the covariance matrix of the moment conditions
quasi-MLE: MLE with mis-specified model where parameter estimates are
fine (consistent ?) but cov_params needs to be adjusted similar or
same as in sandwiches. (I didn't go through any details yet.)
TODO
----
* small sample correction factors, Done for cluster, not yet for HAC
* automatic lag-length selection for Newey-West HAC,
-> added: nlag = floor[4(T/100)^(2/9)] Reference: xtscc paper, Newey-West
note this will not be optimal in the panel context, see Peterson
* HAC should maybe return the chosen nlags
* get consistent notation, varies by paper, S, scale, sigma?
* replace diag(hat_matrix) calculations in cov_hc2, cov_hc3
References
----------
John C. Driscoll and Aart C. Kraay, “Consistent Covariance Matrix Estimation
with Spatially Dependent Panel Data,” Review of Economics and Statistics 80,
no. 4 (1998): 549-560.
Daniel Hoechle, "Robust Standard Errors for Panel Regressions with
Cross-Sectional Dependence", The Stata Journal
Mitchell A. Petersen, “Estimating Standard Errors in Finance Panel Data
Sets: Comparing Approaches,” Review of Financial Studies 22, no. 1
(January 1, 2009): 435 -480.
A. Colin Cameron, Jonah B. Gelbach, and Douglas L. Miller, “Robust Inference
With Multiway Clustering,” Journal of Business and Economic Statistics 29
(April 2011): 238-249.
not used yet:
A.C. Cameron, J.B. Gelbach, and D.L. Miller, “Bootstrap-based improvements
for inference with clustered errors,” The Review of Economics and
Statistics 90, no. 3 (2008): 414–427.
"""
from statsmodels.compat.python import range
import pandas as pd
import numpy as np
from statsmodels.tools.grouputils import Group
from statsmodels.stats.moment_helpers import se_cov
__all__ = ['cov_cluster', 'cov_cluster_2groups', 'cov_hac', 'cov_nw_panel',
'cov_white_simple',
'cov_hc0', 'cov_hc1', 'cov_hc2', 'cov_hc3',
'se_cov', 'weights_bartlett', 'weights_uniform']
#----------- from linear_model.RegressionResults
'''
HC0_se
White's (1980) heteroskedasticity robust standard errors.
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i]
HC0_se is a property. It is not evaluated until it is called.
When it is called the RegressionResults instance will then have
another attribute cov_HC0, which is the full heteroskedasticity
consistent covariance matrix and also `het_scale`, which is in
this case just resid**2. HCCM matrices are only appropriate for OLS.
HC1_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as sqrt(diag(n/(n-p)*HC_0)
HC1_se is a property. It is not evaluated until it is called.
When it is called the RegressionResults instance will then have
another attribute cov_HC1, which is the full HCCM and also `het_scale`,
which is in this case n/(n-p)*resid**2. HCCM matrices are only
appropriate for OLS.
HC2_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC2_se is a property. It is not evaluated until it is called.
When it is called the RegressionResults instance will then have
another attribute cov_HC2, which is the full HCCM and also `het_scale`,
which is in this case is resid^(2)/(1-h_ii). HCCM matrices are only
appropriate for OLS.
HC3_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC3_se is a property. It is not evaluated until it is called.
When it is called the RegressionResults instance will then have
another attribute cov_HC3, which is the full HCCM and also `het_scale`,
which is in this case is resid^(2)/(1-h_ii)^(2). HCCM matrices are
only appropriate for OLS.
'''
def _HCCM(results, scale):
'''
sandwich with pinv(x) * diag(scale) * pinv(x).T
where pinv(x) = (X'X)^(-1) X
and scale is (nobs,)
'''
H = np.dot(results.model.pinv_wexog,
scale[:,None]*results.model.pinv_wexog.T)
return H
def cov_hc0(results):
"""
See statsmodels.RegressionResults
"""
het_scale = results.resid**2 # or whitened residuals? only OLS?
cov_hc0 = _HCCM(results, het_scale)
return cov_hc0
def cov_hc1(results):
"""
See statsmodels.RegressionResults
"""
het_scale = results.nobs/(results.df_resid)*(results.resid**2)
cov_hc1 = _HCCM(results, het_scale)
return cov_hc1
def cov_hc2(results):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(np.dot(results.model.exog,
np.dot(results.normalized_cov_params,
results.model.exog.T)))
het_scale = results.resid**2/(1-h)
cov_hc2_ = _HCCM(results, het_scale)
return cov_hc2_
def cov_hc3(results):
"""
See statsmodels.RegressionResults
"""
# above probably could be optimized to only calc the diag
h = np.diag(np.dot(results.model.exog,
np.dot(results.normalized_cov_params,
results.model.exog.T)))
het_scale=(results.resid/(1-h))**2
cov_hc3_ = _HCCM(results, het_scale)
return cov_hc3_
#---------------------------------------
def _get_sandwich_arrays(results):
"""Helper function to get scores from results
Parameters
"""
if isinstance(results, tuple):
# assume we have jac and hessian_inv
jac, hessian_inv = results
jac = np.asarray(jac)
hessian_inv = np.asarray(hessian_inv)
elif hasattr(results, 'model'):
if hasattr(results, '_results'):
# remove wrapper
results = results._results
# assume we have a results instance
if hasattr(results.model, 'jac'):
xu = results.model.jac(results.params)
hessian_inv = np.linalg.inv(results.model.hessian(results.params))
elif hasattr(results.model, 'score_obs'):
xu = results.model.score_obs(results.params)
hessian_inv = np.linalg.inv(results.model.hessian(results.params))
else:
xu = results.model.wexog * results.wresid[:, None]
hessian_inv = np.asarray(results.normalized_cov_params)
else:
raise ValueError('need either tuple of (jac, hessian_inv) or results' +
'instance')
return xu, hessian_inv
def _HCCM1(results, scale):
'''
sandwich with pinv(x) * scale * pinv(x).T
where pinv(x) = (X'X)^(-1) X
and scale is (nobs, nobs), or (nobs,) with diagonal matrix diag(scale)
Parameters
----------
results : result instance
need to contain regression results, uses results.model.pinv_wexog
scale : ndarray (nobs,) or (nobs, nobs)
scale matrix, treated as diagonal matrix if scale is one-dimensional
Returns
-------
H : ndarray (k_vars, k_vars)
robust covariance matrix for the parameter estimates
'''
if scale.ndim == 1:
H = np.dot(results.model.pinv_wexog,
scale[:,None]*results.model.pinv_wexog.T)
else:
H = np.dot(results.model.pinv_wexog,
np.dot(scale, results.model.pinv_wexog.T))
return H
def _HCCM2(hessian_inv, scale):
'''
sandwich with (X'X)^(-1) * scale * (X'X)^(-1)
scale is (kvars, kvars)
this uses results.normalized_cov_params for (X'X)^(-1)
Parameters
----------
results : result instance
need to contain regression results, uses results.normalized_cov_params
scale : ndarray (k_vars, k_vars)
scale matrix
Returns
-------
H : ndarray (k_vars, k_vars)
robust covariance matrix for the parameter estimates
'''
if scale.ndim == 1:
scale = scale[:,None]
xxi = hessian_inv
H = np.dot(np.dot(xxi, scale), xxi.T)
return H
#TODO: other kernels, move ?
def weights_bartlett(nlags):
'''Bartlett weights for HAC
this will be moved to another module
Parameters
----------
nlags : int
highest lag in the kernel window, this does not include the zero lag
Returns
-------
kernel : ndarray, (nlags+1,)
weights for Bartlett kernel
'''
#with lag zero
return 1 - np.arange(nlags+1)/(nlags+1.)
def weights_uniform(nlags):
'''uniform weights for HAC
this will be moved to another module
Parameters
----------
nlags : int
highest lag in the kernel window, this does not include the zero lag
Returns
-------
kernel : ndarray, (nlags+1,)
weights for uniform kernel
'''
#with lag zero
return np.ones(nlags+1.)
def S_hac_simple(x, nlags=None, weights_func=weights_bartlett):
'''inner covariance matrix for HAC (Newey, West) sandwich
assumes we have a single time series with zero axis consecutive, equal
spaced time periods
Parameters
----------
x : ndarray (nobs,) or (nobs, k_var)
data, for HAC this is array of x_i * u_i
nlags : int or None
highest lag to include in kernel window. If None, then
nlags = floor(4(T/100)^(2/9)) is used.
weights_func : callable
weights_func is called with nlags as argument to get the kernel
weights. default are Bartlett weights
Returns
-------
S : ndarray, (k_vars, k_vars)
inner covariance matrix for sandwich
Notes
-----
used by cov_hac_simple
options might change when other kernels besides Bartlett are available.
'''
if x.ndim == 1:
x = x[:,None]
n_periods = x.shape[0]
if nlags is None:
nlags = int(np.floor(4 * (n_periods / 100.)**(2./9.)))
weights = weights_func(nlags)
S = weights[0] * np.dot(x.T, x) #weights[0] just for completeness, is 1
for lag in range(1, nlags+1):
s = np.dot(x[lag:].T, x[:-lag])
S += weights[lag] * (s + s.T)
return S
def S_white_simple(x):
'''inner covariance matrix for White heteroscedastistity sandwich
Parameters
----------
x : ndarray (nobs,) or (nobs, k_var)
data, for HAC this is array of x_i * u_i
Returns
-------
S : ndarray, (k_vars, k_vars)
inner covariance matrix for sandwich
Notes
-----
this is just dot(X.T, X)
'''
if x.ndim == 1:
x = x[:,None]
return np.dot(x.T, x)
def group_sums(x, group):
'''sum x for each group, simple bincount version, again
group : array, integer
assumed to be consecutive integers
no dtype checking because I want to raise in that case
uses loop over columns of x
#TODO: remove this, already copied to tools/grouputils
'''
#TODO: transpose return in group_sum, need test coverage first
# re-label groups or bincount takes too much memory
if np.max(group) > 2 * x.shape[0]:
group = pd.factorize(group)[0]
return np.array([np.bincount(group, weights=x[:, col])
for col in range(x.shape[1])])
def S_hac_groupsum(x, time, nlags=None, weights_func=weights_bartlett):
'''inner covariance matrix for HAC over group sums sandwich
This assumes we have complete equal spaced time periods.
The number of time periods per group need not be the same, but we need
at least one observation for each time period
For a single categorical group only, or a everything else but time
dimension. This first aggregates x over groups for each time period, then
applies HAC on the sum per period.
Parameters
----------
x : ndarray (nobs,) or (nobs, k_var)
data, for HAC this is array of x_i * u_i
time : ndarray, (nobs,)
timeindes, assumed to be integers range(n_periods)
nlags : int or None
highest lag to include in kernel window. If None, then
nlags = floor[4(T/100)^(2/9)] is used.
weights_func : callable
weights_func is called with nlags as argument to get the kernel
weights. default are Bartlett weights
Returns
-------
S : ndarray, (k_vars, k_vars)
inner covariance matrix for sandwich
References
----------
Daniel Hoechle, xtscc paper
Driscoll and Kraay
'''
#needs groupsums
x_group_sums = group_sums(x, time).T #TODO: transpose return in grou_sum
return S_hac_simple(x_group_sums, nlags=nlags, weights_func=weights_func)
def S_crosssection(x, group):
'''inner covariance matrix for White on group sums sandwich
I guess for a single categorical group only,
categorical group, can also be the product/intersection of groups
This is used by cov_cluster and indirectly verified
'''
x_group_sums = group_sums(x, group).T #TODO: why transposed
return S_white_simple(x_group_sums)
def cov_crosssection_0(results, group):
'''this one is still wrong, use cov_cluster instead'''
#TODO: currently used version of groupsums requires 2d resid
scale = S_crosssection(results.resid[:,None], group)
scale = np.squeeze(scale)
cov = _HCCM1(results, scale)
return cov
def cov_cluster(results, group, use_correction=True):
'''cluster robust covariance matrix
Calculates sandwich covariance matrix for a single cluster, i.e. grouped
variables.
Parameters
----------
results : result instance
result of a regression, uses results.model.exog and results.resid
TODO: this should use wexog instead
use_correction : bool
If true (default), then the small sample correction factor is used.
Returns
-------
cov : ndarray, (k_vars, k_vars)
cluster robust covariance matrix for parameter estimates
Notes
-----
same result as Stata in UCLA example and same as Peterson
'''
#TODO: currently used version of groupsums requires 2d resid
xu, hessian_inv = _get_sandwich_arrays(results)
if not hasattr(group, 'dtype') or group.dtype != np.dtype('int'):
clusters, group = np.unique(group, return_inverse=True)
else:
clusters = np.unique(group)
scale = S_crosssection(xu, group)
nobs, k_params = xu.shape
n_groups = len(clusters) #replace with stored group attributes if available
cov_c = _HCCM2(hessian_inv, scale)
if use_correction:
cov_c *= (n_groups / (n_groups - 1.) *
((nobs-1.) / float(nobs - k_params)))
return cov_c
def cov_cluster_2groups(results, group, group2=None, use_correction=True):
'''cluster robust covariance matrix for two groups/clusters
Parameters
----------
results : result instance
result of a regression, uses results.model.exog and results.resid
TODO: this should use wexog instead
use_correction : bool
If true (default), then the small sample correction factor is used.
Returns
-------
cov_both : ndarray, (k_vars, k_vars)
cluster robust covariance matrix for parameter estimates, for both
clusters
cov_0 : ndarray, (k_vars, k_vars)
cluster robust covariance matrix for parameter estimates for first
cluster
cov_1 : ndarray, (k_vars, k_vars)
cluster robust covariance matrix for parameter estimates for second
cluster
Notes
-----
verified against Peterson's table, (4 decimal print precision)
'''
if group2 is None:
if group.ndim !=2 or group.shape[1] != 2:
raise ValueError('if group2 is not given, then groups needs to be ' +
'an array with two columns')
group0 = group[:, 0]
group1 = group[:, 1]
else:
group0 = group
group1 = group2
group = (group0, group1)
cov0 = cov_cluster(results, group0, use_correction=use_correction)
#[0] because we get still also returns bse
cov1 = cov_cluster(results, group1, use_correction=use_correction)
group_intersection = Group(group)
#cov of cluster formed by intersection of two groups
cov01 = cov_cluster(results,
group_intersection.group_int,
use_correction=use_correction)
#robust cov matrix for union of groups
cov_both = cov0 + cov1 - cov01
#return all three (for now?)
return cov_both, cov0, cov1
def cov_white_simple(results, use_correction=True):
'''
heteroscedasticity robust covariance matrix (White)
Parameters
----------
results : result instance
result of a regression, uses results.model.exog and results.resid
TODO: this should use wexog instead
Returns
-------
cov : ndarray, (k_vars, k_vars)
heteroscedasticity robust covariance matrix for parameter estimates
Notes
-----
This produces the same result as cov_hc0, and does not include any small
sample correction.
verified (against LinearRegressionResults and Peterson)
See Also
--------
cov_hc1, cov_hc2, cov_hc3 : heteroscedasticity robust covariance matrices
with small sample corrections
'''
xu, hessian_inv = _get_sandwich_arrays(results)
sigma = S_white_simple(xu)
cov_w = _HCCM2(hessian_inv, sigma) #add bread to sandwich
if use_correction:
nobs, k_params = xu.shape
cov_w *= nobs / float(nobs - k_params)
return cov_w
def cov_hac_simple(results, nlags=None, weights_func=weights_bartlett,
use_correction=True):
'''
heteroscedasticity and autocorrelation robust covariance matrix (Newey-West)
Assumes we have a single time series with zero axis consecutive, equal
spaced time periods
Parameters
----------
results : result instance
result of a regression, uses results.model.exog and results.resid
TODO: this should use wexog instead
nlags : int or None
highest lag to include in kernel window. If None, then
nlags = floor[4(T/100)^(2/9)] is used.
weights_func : callable
weights_func is called with nlags as argument to get the kernel
weights. default are Bartlett weights
Returns
-------
cov : ndarray, (k_vars, k_vars)
HAC robust covariance matrix for parameter estimates
Notes
-----
verified only for nlags=0, which is just White
just guessing on correction factor, need reference
options might change when other kernels besides Bartlett are available.
'''
xu, hessian_inv = _get_sandwich_arrays(results)
sigma = S_hac_simple(xu, nlags=nlags, weights_func=weights_func)
cov_hac = _HCCM2(hessian_inv, sigma)
if use_correction:
nobs, k_params = xu.shape
cov_hac *= nobs / float(nobs - k_params)
return cov_hac
cov_hac = cov_hac_simple #alias for users
#---------------------- use time lags corrected for groups
#the following were copied from a different experimental script,
#groupidx is tuple, observations assumed to be stacked by group member and
#sorted by time, equal number of periods is not required, but equal spacing is.
#I think this is pure within group HAC: apply HAC to each group member
#separately
def lagged_groups(x, lag, groupidx):
'''
assumes sorted by time, groupidx is tuple of start and end values
not optimized, just to get a working version, loop over groups
'''
out0 = []
out_lagged = []
for l,u in groupidx:
if l+lag < u: #group is longer than lag
out0.append(x[l+lag:u])
out_lagged.append(x[l:u-lag])
if out0 == []:
raise ValueError('all groups are empty taking lags')
#return out0, out_lagged
return np.vstack(out0), np.vstack(out_lagged)
def S_nw_panel(xw, weights, groupidx):
'''inner covariance matrix for HAC for panel data
no denominator nobs used
no reference for this, just accounting for time indices
'''
nlags = len(weights)-1
S = weights[0] * np.dot(xw.T, xw) #weights just for completeness
for lag in range(1, nlags+1):
xw0, xwlag = lagged_groups(xw, lag, groupidx)
s = np.dot(xw0.T, xwlag)
S += weights[lag] * (s + s.T)
return S
def cov_nw_panel(results, nlags, groupidx, weights_func=weights_bartlett,
use_correction='hac'):
'''Panel HAC robust covariance matrix
Assumes we have a panel of time series with consecutive, equal spaced time
periods. Data is assumed to be in long format with time series of each
individual stacked into one array. Panel can be unbalanced.
Parameters
----------
results : result instance
result of a regression, uses results.model.exog and results.resid
TODO: this should use wexog instead
nlags : int or None
Highest lag to include in kernel window. Currently, no default
because the optimal length will depend on the number of observations
per cross-sectional unit.
groupidx : list of tuple
each tuple should contain the start and end index for an individual.
(groupidx might change in future).
weights_func : callable
weights_func is called with nlags as argument to get the kernel
weights. default are Bartlett weights
use_correction : 'cluster' or 'hac' or False
If False, then no small sample correction is used.
If 'cluster' (default), then the same correction as in cov_cluster is
used.
If 'hac', then the same correction as in single time series, cov_hac
is used.
Returns
-------
cov : ndarray, (k_vars, k_vars)
HAC robust covariance matrix for parameter estimates
Notes
-----
For nlags=0, this is just White covariance, cov_white.
If kernel is uniform, `weights_uniform`, with nlags equal to the number
of observations per unit in a balance panel, then cov_cluster and
cov_hac_panel are identical.
Tested against STATA `newey` command with same defaults.
Options might change when other kernels besides Bartlett and uniform are
available.
'''
if nlags == 0: #so we can reproduce HC0 White
weights = [1, 0] #to avoid the scalar check in hac_nw
else:
weights = weights_func(nlags)
xu, hessian_inv = _get_sandwich_arrays(results)
S_hac = S_nw_panel(xu, weights, groupidx)
cov_hac = _HCCM2(hessian_inv, S_hac)
if use_correction:
nobs, k_params = xu.shape
if use_correction == 'hac':
cov_hac *= nobs / float(nobs - k_params)
elif use_correction in ['c', 'clu', 'cluster']:
n_groups = len(groupidx)
cov_hac *= n_groups / (n_groups - 1.)
cov_hac *= ((nobs-1.) / float(nobs - k_params))
return cov_hac
def cov_nw_groupsum(results, nlags, time, weights_func=weights_bartlett,
use_correction=0):
'''Driscoll and Kraay Panel robust covariance matrix
Robust covariance matrix for panel data of Driscoll and Kraay.
Assumes we have a panel of time series where the time index is available.
The time index is assumed to represent equal spaced periods. At least one
observation per period is required.
Parameters
----------
results : result instance
result of a regression, uses results.model.exog and results.resid
TODO: this should use wexog instead
nlags : int or None
Highest lag to include in kernel window. Currently, no default
because the optimal length will depend on the number of observations
per cross-sectional unit.
time : ndarray of int
this should contain the coding for the time period of each observation.
time periods should be integers in range(maxT) where maxT is obs of i
weights_func : callable
weights_func is called with nlags as argument to get the kernel
weights. default are Bartlett weights
use_correction : 'cluster' or 'hac' or False
If False, then no small sample correction is used.
If 'hac' (default), then the same correction as in single time series, cov_hac
is used.
If 'cluster', then the same correction as in cov_cluster is
used.
Returns
-------
cov : ndarray, (k_vars, k_vars)
HAC robust covariance matrix for parameter estimates
Notes
-----
Tested against STATA xtscc package, which uses no small sample correction
This first averages relevant variables for each time period over all
individuals/groups, and then applies the same kernel weighted averaging
over time as in HAC.
Warning:
In the example with a short panel (few time periods and many individuals)
with mainly across individual variation this estimator did not produce
reasonable results.
Options might change when other kernels besides Bartlett and uniform are
available.
References
----------
Daniel Hoechle, xtscc paper
Driscoll and Kraay
'''
xu, hessian_inv = _get_sandwich_arrays(results)
#S_hac = S_nw_panel(xw, weights, groupidx)
S_hac = S_hac_groupsum(xu, time, nlags=nlags, weights_func=weights_func)
cov_hac = _HCCM2(hessian_inv, S_hac)
if use_correction:
nobs, k_params = xu.shape
if use_correction == 'hac':
cov_hac *= nobs / float(nobs - k_params)
elif use_correction in ['c', 'cluster']:
n_groups = len(np.unique(time))
cov_hac *= n_groups / (n_groups - 1.)
cov_hac *= ((nobs-1.) / float(nobs - k_params))
return cov_hac
| bsd-3-clause |
pedroig/Parkinsons-Disease-Digital-Biomarker | CNN/cnn.py | 1 | 18268 | import functools
import tensorflow as tf
import numpy as np
import time
import pandas as pd
import sys
from datetime import datetime
sys.path.insert(0, '../Features')
import utils
def doublewrap(function):
"""
A decorator decorator, allowing to use the decorator to be used without
parentheses if no arguments are provided. All arguments must be optional.
"""
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class CNN:
def __init__(self,
foldValNumber,
learning_rate=0.0001,
batch_size=100,
n_epochs=30,
timeSeries='rest',
validateOnOldAgeGroup=True,
useAugmentedData=False,
noOutlierTable=False):
"""
Input:
- foldValNumber: int
Fold index for the validation set. This number also defines the distribution of the
folds in the training and test sets.
- learning_rate: float
real positive number
- batch_size: int
Number of samples per batch. (batch_size >= 1)
- n_epochs: int
Maximum number of epochs. (n_epochs > 4)
- timeSeries: string
'rest' or 'outbound'
- validateOnOldAgeGroup: bool
Whether to select only people older 56 years in the validation set.
- useAugmentedData: bool
Whether to use augmented data in the training set.
- noOutlierTable: bool
Whether to read from tables without possible outliers.
"""
self.foldValNumber = foldValNumber
self.channels_input = 3
self.n_outputs = 2
self.timeSeriesPaddedLength = 4000
self.numberOfFolds = 10
self.batch_size = batch_size
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.timeSeries = timeSeries
self.useAugmentedData = useAugmentedData
self.noOutlierTable = noOutlierTable
self.validateOnOldAgeGroup = validateOnOldAgeGroup
self.generateDirectoriesNames()
self.logits_prediction
self.optimize
self.metrics
self.tensorboard_summaries
self.init_and_save
@define_scope(initializer=tf.contrib.slim.xavier_initializer())
def logits_prediction(self):
"""
Outputs logits tensor, i.e., the unscaled log probabilities for each of the two possible classes.
"""
self.X = tf.placeholder(tf.float32, shape=[None, self.timeSeriesPaddedLength, self.channels_input], name="X")
self.y = tf.placeholder(tf.int32, shape=[None], name="label")
convFilters = [8, 16, 32, 32, 64, 64, 128, 128]
convKernelSizes = [5, 5, 4, 4, 4, 4, 4, 5]
x = self.X
for layerNumber in range(8):
x = tf.layers.conv1d(inputs=x,
filters=convFilters[layerNumber],
kernel_size=convKernelSizes[layerNumber],
strides=1, padding='valid',
data_format='channels_last',
activation=tf.nn.relu,
name="conv{}".format(layerNumber + 1))
x = tf.layers.max_pooling1d(inputs=x, pool_size=2,
strides=2, padding='valid',
data_format='channels_last',
name='pool{}'.format(layerNumber + 1))
flat = tf.reshape(x, shape=[-1, 12 * 128], name="flat")
logits = tf.layers.dense(inputs=flat, units=self.n_outputs, name="logits")
return logits
@define_scope("Train")
def optimize(self):
"""
Outputs the gradient descent operation.
"""
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_prediction, labels=self.y)
self.loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
return optimizer.minimize(self.loss)
@define_scope("Metrics")
def metrics(self):
"""
Outputs grouped update operation for the following metrics:
* Area under the ROC curve;
* Precision;
* Recall.
It's important to highlight that the update operation accumulates the relevant data from the confusion matrix for each metric
in local variables, enabling the estimation of the selected metrics over a stream of data like the mini-batches. For this
reason, it is required to reset the local variables before calling this method if the intention is to make an estimation
in a different dataset.
"""
logits = self.logits_prediction
positiveClass_probability = tf.sigmoid(logits[:, 1] - logits[:, 0])
self.auc, auc_update_op = tf.metrics.auc(labels=self.y, predictions=positiveClass_probability, num_thresholds=10000, curve='ROC')
self.precision, precision_update_op = tf.metrics.precision_at_thresholds(labels=self.y, thresholds=[0.5],
predictions=positiveClass_probability)
self.recall, recall_update_op = tf.metrics.recall_at_thresholds(labels=self.y, thresholds=[0.5],
predictions=positiveClass_probability)
update_ops = tf.group(auc_update_op, precision_update_op, recall_update_op)
return update_ops
@define_scope("Tensorboard")
def tensorboard_summaries(self):
"""
Tensor summaries for exporting information about the model to tensorboard.
"""
self.loss_summary = tf.summary.scalar('Loss', self.loss)
self.auc_summary = {
'Training': tf.summary.scalar('AUC_Training', self.auc),
'Validation': tf.summary.scalar('AUC_Validation', self.auc)
}
self.precision_summary = {
'Training': tf.summary.scalar('Precision_Training', self.precision[0]),
'Validation': tf.summary.scalar('Precision_Validation', self.precision[0])
}
self.recall_summary = {
'Training': tf.summary.scalar('Recall_Training', self.recall[0]),
'Validation': tf.summary.scalar('Recall_Validation', self.recall[0])
}
self.file_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())
@define_scope("init_and_save")
def init_and_save(self):
"""
Auxiliar tensorflow nodes:
* Node in the graph that initializes all variables when it is run;
* Saver node to save and restore variables to and from checkpoints.
"""
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
def process_summaries_set(self, setName, epoch):
"""
Saves the metrics from the current epoch in the tensorboard summaries and prints it for the user.
Input:
- setName: string
String to select which summaries to process: 'Training' or 'Validation'.
- epoch: int
Epoch number that corresponds to the horizontal axis when plotting the summaries.
"""
self.file_writer.add_summary(self.auc_summary[setName].eval(), epoch)
self.file_writer.add_summary(self.precision_summary[setName].eval(), epoch)
self.file_writer.add_summary(self.recall_summary[setName].eval(), epoch)
self.printMetrics(setName)
def printMetrics(self, setName):
"""
Input:
- setName: string
Title string to be printed.
"""
print("\t{}".format(setName))
print("\t\tROC AUC:", self.auc.eval())
print("\t\tPrecision:", self.precision.eval()[0])
print("\t\tRecall:", self.recall.eval()[0])
def evaluateMetricsRestored(self, restoreFolderName):
"""
Restores trainable parameters correspondent to the folder name specified and evaluates
the performance of the model in the test set.
Input:
- restoreFolderName: string
Folder name for checkpoint to be restored.
"""
with tf.Session() as sess:
# reset the local variables used for metrics
sess.run(tf.local_variables_initializer())
self.saver.restore(sess, "./checkpoints/{}/model.ckpt".format(restoreFolderName))
sess.run(self.metrics, feed_dict=self.feed_dict_test)
self.printMetrics("Test:")
return self.auc.eval()
def train(self):
"""
Executes the tensorflow graph to train the model while also saving and displaying metrics of the process.
Note: It is important to highlight that the mini-batches are loaded to memory on demand, making it so that only
one is in memory at any given time.
Outputs the epoch (int) in which the AUROC score is maximum in the validation set.
"""
max_auc = -1
epochsSinceLastMax = 0
n_batches = len(self.featuresTableTrain) // self.batch_size
with tf.Session() as sess:
self.init.run()
for epoch in range(self.n_epochs):
# reset the local variables used for metrics
sess.run(tf.local_variables_initializer())
epoch_start_time = time.time()
for batch_index in range(n_batches):
# Building Batch
featuresTableBatch = self.featuresTableTrain[self.featuresTableTrain.index // self.batch_size == batch_index]
feed_dict_batch = self.buildFeedDict(featuresTableBatch)
# Training operation and metrics updates
sess.run([self.optimize, self.metrics], feed_dict=feed_dict_batch)
# Loss function summary
if batch_index % 4 == 0:
summary_str = self.loss_summary.eval(feed_dict=feed_dict_batch)
step = epoch * n_batches + batch_index
self.file_writer.add_summary(summary_str, step)
# Metrics
print("Epoch: {}, Execution time: {} seconds".format(epoch, time.time() - epoch_start_time))
# Metrics on training data
self.process_summaries_set("Training", epoch)
# Validation set metrics for current epoch
sess.run(tf.local_variables_initializer())
sess.run(self.metrics, feed_dict=self.feed_dict_val)
self.process_summaries_set("Validation", epoch)
if epoch > 3:
if max_auc < self.auc.eval():
max_auc = self.auc.eval()
epochsSinceLastMax = 0
savingEpoch = epoch
self.saver.save(sess, self.checkpointdir.format(epoch))
else:
epochsSinceLastMax += 1
if epochsSinceLastMax > 8:
break
self.file_writer.close()
return savingEpoch
def generateDirectoriesNames(self):
"""
Generates names for:
*Checkpoint directory;
*Log directory for tensorboard.
"""
self.now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
self.folderName = "run-{}_{}_learningRate-{}_batchSize-{}-fold{}".format(
self.now,
self.timeSeries,
self.learning_rate,
self.batch_size,
self.foldValNumber)
if self.useAugmentedData:
self.folderName += "_augmented"
if self.noOutlierTable:
self.folderName += "_noOutliers"
self.logdir = "tf_logs/{}/".format(self.folderName)
self.checkpointdir = "./checkpoints/{}".format(self.folderName)
self.checkpointdir += "_savingEpoch{}/model.ckpt"
def readPreprocessTable(self, name):
"""
Input:
- name: string
Table to be loaded: 'train', val' or 'test'.
"""
featuresTable = pd.read_csv("../data/{}_extra_columns.csv".format(name), index_col=0)
# Renaming to use the column name to access a named tuple
for timeSeriesName in ['outbound', 'rest']: # , 'return']:
featuresTable.rename(columns={'deviceMotion_walking_{}.json.items'.format(timeSeriesName):
'deviceMotion_walking_' + timeSeriesName},
inplace=True)
featuresTable.reset_index(inplace=True, drop=True)
return featuresTable
def buildFeedDict(self, table):
"""
Receives table and builds the correspondent feed dictionary to be used
in the tensorflow session.
Input:
- table: pandas DataFrame
"""
X, y = self.generateSetFromTable(table)
feed_dict = {
self.y: y,
self.X: X
}
return feed_dict
def loadFoldTables(self):
"""
Loads tables for all the folds used in the cross-validation.
"""
folds = {}
for foldIndex in range(self.numberOfFolds):
table = 'fold{}'.format(foldIndex)
if self.noOutlierTable:
table += '_noOutliers'
if self.useAugmentedData:
table += '_augmented'
folds[foldIndex] = self.readPreprocessTable(table)
return folds
def evaluateFoldConfiguration(self):
"""
The number of folds is equal to the number of distributions of Training and Validation/Test
sets. This function trains the model in one possible distribution, maximizes the AUROC on
the validation set and outputs the AUROC for the test set using the trainable parameters
from the validation maximum.
"""
foldTestNumber = (self.foldValNumber + 1) % self.numberOfFolds
folds = self.loadFoldTables()
featuresTableVal = folds[self.foldValNumber]
featuresTableTest = folds[foldTestNumber]
if self.validateOnOldAgeGroup:
featuresTableVal = featuresTableVal[featuresTableVal.age > 56]
featuresTableTest = featuresTableTest[featuresTableTest.age > 56]
# Removing augmented data from validation and test sets
if self.useAugmentedData:
augmentedRowsVal = featuresTableVal[featuresTableVal.augmented].index
augmentedRowsTest = featuresTableTest[featuresTableTest.augmented].index
featuresTableVal.drop(augmentedRowsVal, inplace=True)
featuresTableTest.drop(augmentedRowsTest, inplace=True)
del folds[self.foldValNumber]
del folds[foldTestNumber]
self.feed_dict_val = self.buildFeedDict(featuresTableVal.sample(frac=1))
self.feed_dict_test = self.buildFeedDict(featuresTableTest.sample(frac=1))
self.featuresTableTrain = pd.concat(folds.values())
self.featuresTableTrain = self.featuresTableTrain.sample(frac=1)
self.featuresTableTrain.reset_index(inplace=True, drop=True)
savingEpoch = self.train()
return self.evaluateMetricsRestored("{}_savingEpoch{}".format(self.folderName, savingEpoch))
def generateSetFromTable(self, featuresTable):
"""
Loads all the rotation rate JSON files from a given table into memory.
"""
axes = ['x', 'y', 'z']
y = featuresTable.Target
y = np.array(y)
X = {}
timeSeriesName = 'deviceMotion_walking_' + self.timeSeries
X = pd.DataFrame(columns=axes)
for row in featuresTable.itertuples():
if "augmented" in featuresTable and row.augmented:
data = utils.readJSON_data(getattr(row, timeSeriesName), timeSeriesName, "RotRate_augmented.json")
else:
data = utils.readJSON_data(getattr(row, timeSeriesName), timeSeriesName, "RotRate.json")
XElement = data.loc[:, axes]
zeros = pd.DataFrame(0, index=np.arange(self.timeSeriesPaddedLength - len(data)), columns=axes)
X = pd.concat([X, XElement, zeros])
X = np.asarray(X)
X = X.reshape((-1, self.timeSeriesPaddedLength, self.channels_input))
return X, y
def main():
tf.reset_default_graph()
foldValNumber = int(sys.argv[1])
print("Running foldValNumber", foldValNumber)
model = CNN(foldValNumber,
learning_rate=0.0001,
batch_size=100,
n_epochs=30,
timeSeries='rest',
useAugmentedData=True,
noOutlierTable=True)
test_auroc = model.evaluateFoldConfiguration()
outFile = open('Folds/fold{}.txt'.format(foldValNumber), 'w')
outFile.write(str(test_auroc))
outFile.close()
if __name__ == '__main__':
main()
| mit |
DANA-Laboratory/CoolProp | wrappers/Python/CoolProp/Plots/PsychScript.py | 3 | 1960 |
# This file was auto-generated by the PsychChart.py script in wrappers/Python/CoolProp/Plots
if __name__=='__main__':
import numpy, matplotlib
from CoolProp.HumidAirProp import HAPropsSI
from CoolProp.Plots.Plots import InlineLabel
p = 101325
Tdb = numpy.linspace(-10,60,100)+273.15
# Make the figure and the axes
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
# Saturation line
w = [HAPropsSI('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
# Humidity lines
RHValues = [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for RH in RHValues:
w = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
# Humidity lines
for H in [-20000, -10000, 0, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000]:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAPropsSI('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAPropsSI('W','H',H,'P',p,'R',1.0)
w0 = HAPropsSI('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
xv = Tdb #[K]
for RH in [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
yv = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAPropsSI('W','P',p,'H',65000.000000,'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+'{s:0.0f}'.format(s=RH*100)+'%'
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
matplotlib.pyplot.show()
| mit |
glneo/gnuradio | gr-filter/examples/resampler.py | 58 | 4454 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = filter.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = analog.sig_source_c(fs_in, analog.GR_SIN_WAVE, fc, 1)
#self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = filter.pfb.arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = filter.pfb.arb_resampler_ccf(rerate)
self.snk_in = blocks.vector_sink_c()
self.snk_0 = blocks.vector_sink_c()
self.snk_1 = blocks.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
itaiin/arrow | python/pyarrow/tests/test_scalars.py | 1 | 7485 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
from pyarrow.compat import unittest, u, unicode_type
import pyarrow as pa
class TestScalars(unittest.TestCase):
def test_null_singleton(self):
with pytest.raises(Exception):
pa.NAType()
def test_nulls(self):
arr = pa.array([None, None])
for v in arr:
assert v is pa.NA
assert v.as_py() is None
def test_bool(self):
arr = pa.array([True, None, False, None])
v = arr[0]
assert isinstance(v, pa.BooleanValue)
assert repr(v) == "True"
assert str(v) == "True"
assert v.as_py() is True
assert arr[1] is pa.NA
def test_int64(self):
arr = pa.array([1, 2, None])
v = arr[0]
assert isinstance(v, pa.Int64Value)
assert repr(v) == "1"
assert str(v) == "1"
assert v.as_py() == 1
assert v == 1
assert arr[2] is pa.NA
def test_double(self):
arr = pa.array([1.5, None, 3])
v = arr[0]
assert isinstance(v, pa.DoubleValue)
assert repr(v) == "1.5"
assert str(v) == "1.5"
assert v.as_py() == 1.5
assert v == 1.5
assert arr[1] is pa.NA
v = arr[2]
assert v.as_py() == 3.0
def test_half_float(self):
arr = pa.array([np.float16(1.5), None], type=pa.float16())
v = arr[0]
assert isinstance(v, pa.HalfFloatValue)
assert repr(v) == "1.5"
assert str(v) == "1.5"
assert v.as_py() == 1.5
assert v == 1.5
assert arr[1] is pa.NA
def test_string_unicode(self):
arr = pa.array([u'foo', None, u'mañana'])
v = arr[0]
assert isinstance(v, pa.StringValue)
assert v.as_py() == u'foo'
assert repr(v) == repr(u"foo")
assert str(v) == str(u"foo")
assert v == u'foo'
# Assert that newly created values are equal to the previously created
# one.
assert v == arr[0]
assert arr[1] is pa.NA
v = arr[2].as_py()
assert v == u'mañana'
assert isinstance(v, unicode_type)
def test_bytes(self):
arr = pa.array([b'foo', None, u('bar')])
def check_value(v, expected):
assert isinstance(v, pa.BinaryValue)
assert v.as_py() == expected
assert str(v) == str(expected)
assert repr(v) == repr(expected)
assert v == expected
assert v != b'xxxxx'
buf = v.as_buffer()
assert isinstance(buf, pa.Buffer)
assert buf.to_pybytes() == expected
check_value(arr[0], b'foo')
assert arr[1] is pa.NA
check_value(arr[2], b'bar')
def test_fixed_size_bytes(self):
data = [b'foof', None, b'barb']
arr = pa.array(data, type=pa.binary(4))
v = arr[0]
assert isinstance(v, pa.FixedSizeBinaryValue)
assert v.as_py() == b'foof'
assert arr[1] is pa.NA
v = arr[2].as_py()
assert v == b'barb'
assert isinstance(v, bytes)
def test_list(self):
arr = pa.array([['foo', None], None, ['bar'], []])
v = arr[0]
assert len(v) == 2
assert isinstance(v, pa.ListValue)
assert repr(v) == "['foo', None]"
assert v.as_py() == ['foo', None]
assert v[0].as_py() == 'foo'
assert v[1] is pa.NA
assert v[-1] == v[1]
assert v[-2] == v[0]
with pytest.raises(IndexError):
v[-3]
with pytest.raises(IndexError):
v[2]
assert arr[1] is pa.NA
v = arr[3]
assert len(v) == 0
@pytest.mark.pandas
def test_timestamp(self):
import pandas as pd
arr = pd.date_range('2000-01-01 12:34:56', periods=10).values
units = ['ns', 'us', 'ms', 's']
for i, unit in enumerate(units):
dtype = 'datetime64[{0}]'.format(unit)
arrow_arr = pa.Array.from_pandas(arr.astype(dtype))
expected = pd.Timestamp('2000-01-01 12:34:56')
assert arrow_arr[0].as_py() == expected
assert arrow_arr[0].value * 1000**i == expected.value
tz = 'America/New_York'
arrow_type = pa.timestamp(unit, tz=tz)
dtype = 'datetime64[{0}]'.format(unit)
arrow_arr = pa.Array.from_pandas(arr.astype(dtype),
type=arrow_type)
expected = (pd.Timestamp('2000-01-01 12:34:56')
.tz_localize('utc')
.tz_convert(tz))
assert arrow_arr[0].as_py() == expected
assert arrow_arr[0].value * 1000**i == expected.value
@pytest.mark.pandas
def test_dictionary(self):
import pandas as pd
colors = ['red', 'green', 'blue']
colors_dict = {'red': 0, 'green': 1, 'blue': 2}
values = pd.Series(colors * 4)
categorical = pd.Categorical(values, categories=colors)
v = pa.DictionaryArray.from_arrays(categorical.codes,
categorical.categories)
for i, c in enumerate(values):
assert v[i].as_py() == c
assert v[i].dictionary_value == c
assert v[i].index_value == colors_dict[c]
def test_int_hash(self):
# ARROW-640
int_arr = pa.array([1, 1, 2, 1])
assert hash(int_arr[0]) == hash(1)
def test_float_hash(self):
# ARROW-640
float_arr = pa.array([1.4, 1.2, 2.5, 1.8])
assert hash(float_arr[0]) == hash(1.4)
def test_string_hash(self):
# ARROW-640
str_arr = pa.array(["foo", "bar"])
assert hash(str_arr[1]) == hash("bar")
def test_bytes_hash(self):
# ARROW-640
byte_arr = pa.array([b'foo', None, b'bar'])
assert hash(byte_arr[2]) == hash(b"bar")
def test_array_to_set(self):
# ARROW-640
arr = pa.array([1, 1, 2, 1])
set_from_array = set(arr)
assert isinstance(set_from_array, set)
assert set_from_array == {1, 2}
def test_struct_value_subscripting(self):
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
assert arr[0]['x'] == 1
assert arr[0]['y'] == 2.5
assert arr[1]['x'] == 3
assert arr[1]['y'] == 4.5
assert arr[2]['x'] == 5
assert arr[2]['y'] == 6.5
with pytest.raises(IndexError):
arr[4]['non-existent']
with pytest.raises(KeyError):
arr[0]['non-existent']
| apache-2.0 |
xuewei4d/scikit-learn | examples/svm/plot_iris_svc.py | 34 | 3772 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C, max_iter=10000),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, gamma='auto', C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
| bsd-3-clause |
gtcasl/eiger | eiger/ClusterAnalysis.py | 1 | 5976 | #
# \file ClusterAnalysis.py
# \author Andrew Kerr <[email protected]>
# \date June 27, 2011
#
# \brief given an input matrix and an orthornomal basis, identifies clusters of applications
#
import numpy as np
##
#
# Iterative implementation of Lloyd's Algorithm solving J. MacQueen's formulation of k-means
# see: "Some methods for classification and analysis of multivariate observations"
#
class KMeans:
##
#
def __init__(self, data, k = 10, maxIterations = 50):
"""
Constructs a K-Means cluster analysis pass for an N-by-P matrix, where N is the number of
observations of dimension P
"""
self.means = np.mean(data, axis=0)
data = data - self.means
self.stdevs = np.std(data, axis=0, ddof=1)
self.stdevs[self.stdevs==0.0] = 1.0
self.data = data / self.stdevs
self.k = k
self.N = self.data.shape[0]
self.maxIterations = maxIterations
self._initialize()
##
#
def _initialize(self):
self.clusters = [i % self.k for i in range(0, self.N)]
self.counts = [0 for i in range(0, self.k)]
self.centers = self.data[0:self.k, :]
##
#
def _evalCenters(self):
self.centers = np.matrix(np.zeros((self.k, self.data.shape[1])))
for i in range(0, self.N):
self.centers[self.clusters[i], :] += self.data[i, :]
for i in range(0, self.k):
self.centers[i, :] /= float(self.counts[i])
##
#
def _assignClusters(self):
newClusters = [0 for i in range(0, self.N)]
newCounts = [0 for i in range(0, self.k)]
converged = True
for i in range(0, self.N):
bestDistance = 0
bestCluster = 0
for c in range(0, self.k):
diff = self.data[i, :] - self.centers[c, :]
distance = np.dot(diff, diff.T)
if not c or distance < bestDistance:
bestCluster = c
bestDistance = distance
if bestCluster != self.clusters[i]:
converged = False
newClusters[i] = bestCluster
newCounts[bestCluster] += 1
for c in range(0, self.k):
if newCounts[c] == 0:
print "Cluster %s has no elements" % (c,)
print " it had %s elements before" % (self.counts[c], )
self.clusters = newClusters
self.counts = newCounts
return converged
##
#
def collect(self):
"""
Returns an array of clusters, where each cluster is a sorted array containing the index of
member elements.
"""
clusters = [[] for i in range(0, self.k)]
for i in range(0, self.N):
clusters[self.clusters[i]].append(i)
return clusters
def plot(self, iteration):
"""
"""
figure()
clusters = self.collect()
color = ['#0000a0', '#00a000', '#a00000', '#00a0f0', '#a0f000', '#f000a0', '#f000f0', '#00f0f0', '#f0f000']
p = 0
for c in clusters:
# print c
xx = [self.data[i, :] for i in c]
plot([x[0, 0] for x in xx], [x[0, 1] for x in xx], 'o', color = color[p])
p += 1
plot([x[0, 0] for x in self.centers], [x[0, 1] for x in self.centers], '+', color='#000000')
title('K = %s' % (iteration,))
# print self.centers
def kmeans(self):
"""
Performs k-means cluster analysis on the input data set return a jagged array of arrays
"""
converged = False
iterations = 0
while not converged:
converged = self._assignClusters()
iterations += 1
if iterations > self.maxIterations:
converged = True
if not converged:
self._evalCenters()
return self.collect()
def closestCluster(self, experiment):
"""
Finds which cluster is closest to each row in experiment
"""
data = (experiment - self.means) / self.stdevs
winners = []
for e in data:
e = np.matrix(e)
mindist = float('inf')
for c,cluster in enumerate(self.centers):
cluster = np.matrix(cluster)
dist = 0.0
for i in range(0,self.data.shape[1]):
dist += (e[0,i] - cluster[0,i])**2
if(dist < mindist):
mindist = dist
winner = c
winners.append(winner)
return winners
###################################################################################################
#
#
if __name__ == "__main__":
# test KMeans demo
from pylab import plot, show, title, figure
data = np.matrix(np.random.rand(32, 2))
experiment = np.matrix(np.random.rand(4,2))
for k in range(4, 9, 2):
maxIterations = 10
kmeans = KMeans(data, k, maxIterations)
res = kmeans.kmeans()
print res
closest = kmeans.closestCluster(experiment)
print "closest"
print closest
kmeans.plot(k)
show()
"""
import scipy.cluster.hierarchy as h
import numpy as np
import scipy.spatial.distance as d
import matplotlib.pyplot as pp
n = 32
thresh = .15
np.random.seed(18237)
data = np.matrix(np.random.rand(n,2))
dist = d.pdist(data,'euclidean')
res = h.linkage(dist)
clust = h.fcluster(res,t=thresh,criterion='distance')
print
for x,y in
fig = pp.figure()
ax = fig.add_subplot(111)
num_clusters = max(clust)
colors = [str(x) for x in clust/float(num_clusters)]
"""
"""
for i in range(0,n,1):
ax.scatter(data[i,0],data[i,1],s=40, c=colors[i])
h.dendrogram(res, color_threshold=thresh)
pp.show()
pass
"""
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/utils/tests/test_utils.py | 1 | 8985 | import warnings
from itertools import chain
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from sklearn.utils import check_random_state
from sklearn.utils import column_or_1d
from sklearn.utils import deprecated
from sklearn.utils import gen_even_slices
from sklearn.utils import resample
from sklearn.utils import safe_indexing
from sklearn.utils import safe_mask
from sklearn.utils import shuffle
from sklearn.utils.arpack import eigsh
from sklearn.utils.extmath import pinvh
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1, 1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| mit |
femtotrader/arctic-updater | arctic_updater/cli/arctic_updater_cli.py | 1 | 2788 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
$ python arctic_updater/cli/arctic_updater_cli.py --updater truefx --symbol EURUSD --start 20130101 --end 20130201 --source 'ticks'
"""
import logging
logger = logging.getLogger(__name__)
import argparse
import os
import datetime
import pandas as pd
from arctic import Arctic
from arctic_updater import arctic_updater
from arctic_updater.updaters.factory import updater
from arctic_updater.library import update
from arctic_updater.utils import get_session
from arctic_updater.defaults import (MONGO_HOST_DEFAULT, \
SOURCE_DEFAULT, FREQ_DEFAULT, SYMBOLS_DEFAULT, UPDATER_DEFAULT)
def main():
parser = argparse.ArgumentParser(prog="store", description='Store data to DB')
parser.add_argument('--host', help="MongoDB host", default=MONGO_HOST_DEFAULT, type=str)
parser.add_argument('--updater', help="Updater", default=UPDATER_DEFAULT, type=str)
parser.add_argument('-s', '--source', help="Source", default=SOURCE_DEFAULT, type=str)
parser.add_argument('--symbols', help="Symbol", default=SYMBOLS_DEFAULT, type=str)
parser.add_argument('--start', help="Start date", default='', type=str)
parser.add_argument('--end', help="End date", default='', type=str)
parser.add_argument('--freq', help="Freq", default='', type=str)
parser.add_argument('--max_rows', help="max_rows", default=10, type=int)
parser.add_argument('--max_columns', help="max_columns", default=6, type=int)
parser.add_argument('--api_key', help="API key", default='', type=str)
parser.add_argument('--expire_after', help="Cache expiration ('0': no cache, '-1': no expiration, 'HH:MM:SS.X': expiration duration)", default='24:00:00.0', type=str)
args = parser.parse_args()
pd.set_option('max_rows', args.max_rows)
pd.set_option('expand_frame_repr', False)
pd.set_option('max_columns', args.max_columns)
if args.start != '':
start = pd.to_datetime(args.start)
else:
start = None
if args.end != '':
end = pd.to_datetime(args.end)
else:
end = None
if args.freq != '':
freq = args.freq
else:
freq = None
symbols = args.symbols.split(',')
session = get_session(args.expire_after, 'cache')
my_updater = updater(args.updater, session=session)
if args.api_key != '':
my_updater.set_credentials(api_key=args.api_key)
store = Arctic(args.host)
library_name = my_updater.library_name(args.source, freq)
print(library_name)
store.initialize_library(library_name)
library = store[library_name]
for symbol in symbols:
update(library, my_updater, symbol, start, end, freq, args.source.lower())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| isc |
sbremer/hybrid_rs | results/sampling_from_cf.py | 1 | 1669 | import script_chdir
import numpy as np
import results.plots as lplot
import matplotlib.pyplot as plt
from hybrid_model.dataset import get_dataset
from hybrid_model.index_sampler import IndexSamplerUserItembased as IndexSampler
dataset = get_dataset('ml100k')
(inds_u, inds_i, y, users_features, items_features) = dataset.data
user_dist = np.bincount(inds_u, minlength=dataset.n_users)
item_dist = np.bincount(inds_i, minlength=dataset.n_items)
order_users = np.argsort(-user_dist)
order_items = np.argsort(-item_dist)
dist_users = user_dist[order_users]
dist_items = item_dist[order_items]
inds_u = np.argsort(order_users)[inds_u]
inds_i = np.argsort(order_items)[inds_i]
# Index sampling
sampler_config = {'f_cf': 0.15, 'min_ratings_user': 30, 'f_user': 3.0, 'min_ratings_item': 10, 'f_item': 3.0}
sampler = IndexSampler(dist_users, dist_items, sampler_config, [inds_u, inds_i])
from_cf = sampler.get_indices_from_cf()
from_md = sampler.get_indices_from_md()
from_cf = (from_cf[0].flatten(), from_cf[1].flatten())
from_md = (from_md[0].flatten(), from_md[1].flatten())
fig, ax = lplot.newfig(1.0, 0.7)
ax.scatter(from_cf[0], from_cf[1], s=0.02, marker='s', label='$S_{CF}$', alpha=0.5)
ax.set_title('Index Tuple Sampling from CF')
ax.set_xlabel('Users with\n$\leftarrow$ more - fewer $\\to$\nratings')
ax.set_ylabel('Items with\n$\leftarrow$ more - fewer $\\to$\nratings')
plt.xticks([], [])
plt.yticks([], [])
lgnd = ax.legend(loc="lower center", numpoints=1, fontsize=7)
#change the marker size manually for both lines
for handle in lgnd.legendHandles:
handle._alpha = 1.0
handle.set_sizes([20])
lplot.savefig('sampling_from_cf')
# plt.show()
| apache-2.0 |
flailingsquirrel/cmake_scipy_ctypes_example | gdb-plot/plotter.py | 2 | 1448 | #!/usr/bin/python
__author__="Brian Hone"
import sys, os, string
import matplotlib.pyplot as plot
import mpl_toolkits.mplot3d.axes3d as p3
import numpy as np
import gdb
from gp_data_extractor import *
class Plotter( gdb.Command ):
def __init__( self ):
super( Plotter, self ).__init__("plot", gdb.COMMAND_OBSCURE )
def invoke( self, arg, from_tty ):
args = arg.split()
data = gp_get_data( args )
fig = plot.figure()
ax = fig.add_subplot(111)
ax.grid( True )
for u in data:
if u.dtype.kind == 'c':
ax.plot( np.abs(u) )
else:
ax.plot( u )
leg = ax.legend((args),
'upper right', shadow=False)
leg.get_frame().set_alpha(0.5)
plot.show()
# end class Plotter
class PlotThreeD( gdb.Command ):
def __init__( self ):
super( PlotThreeD, self ).__init__("plot3", gdb.COMMAND_OBSCURE )
def invoke( self, arg, from_tty ):
args = arg.split()
data = gp_get_data( args )
fig = plot.figure()
ax = p3.Axes3D( fig )
ax.grid( True )
for u in data:
if u.dtype.kind == 'c':
ax.plot( list(range(len(u))), u.real, u.imag )
leg = ax.legend((args),
'upper right', shadow=False)
leg.get_frame().set_alpha(0.5)
plot.show()
# end class PlotThreeD
Plotter()
PlotThreeD()
| bsd-3-clause |
matthewljones/computingincontext | textmining_blackboxes.py | 1 | 1791 | """computing in context social sciences
data mining utilities
these are fragile and not production code, but here to get you started
to scale them you'll need to start add exceptions
note also that they are not necessarily using the maximally memory
efficient forms of internal representation"""
import pandas as pd
import matplotlib.pyplot as plt
import os
import sys
import traceback
def icantbelieve(text):
print("I can't believe it's not "+text)
def readtextfiles(our_directory):
"""reads in plain text files and puts them in order in a list"""
current_dir=os.getcwd()
os.chdir(our_directory)
files=[file for file in os.listdir(".") if not file.startswith('.')] #defeat hidden files
files=[file for file in files if not os.path.isdir(file)==True] #defeat directories
articles=[]
for file in files:
with open(file, encoding="ascii", errors="surrogateescape") as plaintext: #ignoring errors insofar as poss
lines=plaintext.readlines()
article=" ".join(lines) #alter lines if want to skip lines
articles.append(article) #you might want to extract the file name to use; how do it?
os.chdir(current_dir)
return articles, files
def data_cleanse(docs_to_clean):
import re
D=len(docs_to_clean)
for d in range(0, D):
docs_to_clean[d] = docs_to_clean[d].lower()
docs_to_clean[d] = re.sub(r'-', ' ', docs_to_clean[d])
docs_to_clean[d] = re.sub(r'[^a-zA-Z0-9 ]', '', docs_to_clean[d])
docs_to_clean[d] = re.sub(r' +', ' ', docs_to_clean[d])
docs_to_clean[d] = re.sub(r'\s\w\s', ' ', docs_to_clean[d]) #eliminate single letters
return docs_to_clean
def plot_mds(positions, names):
xs, ys=positions[:, 0], positions[:, 1]
for x, y, name in zip(xs, ys, names):
plt.scatter(x,y)
plt.text(x,y,name)
plt.show()
| gpl-2.0 |
juliusf/Genetic-SRCPSP | tools/stat_inference/baysean_markov_chain_monte_carlo.py | 1 | 1679 | __author__ = 'jules'
import pymc as mc
from model import _model
import numpy as np
import pylab
import deepThought.ORM.ORM as ORM
from deepThought.util import list_to_ccdf
from numpy import mean
import matplotlib.pyplot as plt
import pylab as pylab
def main():
job = ORM.deserialize("/tmp/output.pickle")
results = sorted(job.tasks.values(), key=lambda x: len(x.execution_history), reverse=True)
set1 = results[0].execution_history
f = open('/tmp/data.table','w')
for data in set1:
f.write(str(data) +'\n') # python will convert \n to os.linesep
f.close() # you can omit in most cases as the destructor will call if
test = np.array(set1)
np_arr = np.array(set1)
iqr = np.subtract(*np.percentile(np_arr, [75, 25]))
bin_width = 2 * iqr * len(set1) ** (-1/3) # http://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
print("bin width: %s" % bin_width)
plt.hist(set1, bins=np.arange(min(set1), max(set1) + bin_width, bin_width), normed=True)
alpha, beta= analyze(test)
x = np.arange(0, 1000, 0.1)
s = hyperexp2_pdf(lam1, lam2, p, x)
plt.plot(x,s)
pylab.xlim([0,1000])
pylab.ylim([0,0.0006])
plt.show()
def hyperexp2_pdf(lam1,lam2,p,x):
return p * lam1 * np.exp(-lam1 * x) + (1-p) * lam2 * np.exp(-lam2 * x)
def exponential_pdf(lam, x):
if lam < 0:
return 0
else:
return lam * np.exp(-lam*x)
def analyze(data, discrete=True, xmin=1.):
model = mc.MCMC(_model(data))
model.sample(5000)
print 'alpha',mean(model.trace('alpha')[:])
print 'beta',mean(model.trace('beta')[:])
#return lam1, lam2, p
if __name__=='__main__':
main() | mit |
fabioticconi/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 73 | 6451 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
jniediek/mne-python | mne/time_frequency/tests/test_tfr.py | 3 | 23690 | import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
import mne
from mne import Epochs, read_events, pick_types, create_info, EpochsArray
from mne.io import read_raw_fif
from mne.utils import (_TempDir, run_tests_if_main, slow_test, requires_h5py,
grand_average)
from mne.time_frequency import single_trial_power
from mne.time_frequency.tfr import (cwt_morlet, morlet, tfr_morlet,
_make_dpss, tfr_multitaper, rescale,
AverageTFR, read_tfrs, write_tfrs,
combine_tfr, cwt, _compute_tfr)
from mne.viz.utils import _fake_click
from itertools import product
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
event_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-eve.fif')
def test_morlet():
"""Test morlet with and without zero mean."""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert_true(np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert_true(np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test the to-be-deprecated time-frequency transform (PSD and ITC)."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), add_eeg_ref=False)
data = epochs.get_data()
times = epochs.times
nave = len(data)
epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), add_eeg_ref=False)
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Now compute evoked
evoked = epochs.average()
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True, decim=slice(0, 2))
# Test picks argument and average parameter
assert_raises(ValueError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=True, average=False)
power_picks, itc_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, picks=picks, average=True)
epochs_power_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=False, picks=picks, average=False)
power_picks_avg = epochs_power_picks.average()
# the actual data arrays here are equivalent, too...
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_picks_avg.data)
assert_array_almost_equal(itc.data, itc_picks.data)
assert_array_almost_equal(power.data, power_evoked.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test sub
power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert_true('meg' in power)
assert_true('grad' in power)
assert_false('mag' in power)
assert_false('eeg' in power)
assert_equal(power.nave, nave)
assert_equal(itc.nave, nave)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(power_.data.shape == (len(picks), len(freqs), 2))
assert_true(power_.data.shape == itc_.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
# grand average
itc2 = itc.copy()
itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop
gave = grand_average([itc2, itc])
assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
itc2.data.shape[1],
itc2.data.shape[2]))
assert_equal(itc2.ch_names[1:], gave.ch_names)
assert_equal(gave.nave, 2)
itc2.drop_channels(itc2.info["bads"])
assert_array_almost_equal(gave.data, itc2.data)
itc2.data = np.ones(itc2.data.shape)
itc.data = np.zeros(itc.data.shape)
itc2.nave = 2
itc.nave = 1
itc.drop_channels([itc.ch_names[0]])
combined_itc = combine_tfr([itc2, itc])
assert_array_almost_equal(combined_itc.data,
np.ones(combined_itc.data.shape) * 2 / 3)
# more tests
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
Fs = raw.info['sfreq'] # sampling in Hz
tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
tfr2 = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2,
decim=slice(0, 2))
assert_true(tfr2.shape == (len(picks), len(freqs), 2))
single_power = single_trial_power(data, Fs, freqs, use_fft=False,
n_cycles=2)
single_power2 = single_trial_power(data, Fs, freqs, use_fft=False,
n_cycles=2, decim=slice(0, 2))
single_power3 = single_trial_power(data, Fs, freqs, use_fft=False,
n_cycles=2, decim=slice(1, 3))
single_power4 = single_trial_power(data, Fs, freqs, use_fft=False,
n_cycles=2, decim=slice(2, 4))
assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
assert_array_almost_equal(np.mean(single_power2, axis=0),
power.data[:, :, :2])
assert_array_almost_equal(np.mean(single_power3, axis=0),
power.data[:, :, 1:3])
assert_array_almost_equal(np.mean(single_power4, axis=0),
power.data[:, :, 2:4])
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
# Test decimation:
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in [2, 3, 8, 9]:
for use_fft in [True, False]:
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
use_fft=use_fft, return_itc=True,
decim=decim)
assert_equal(power.data.shape[2],
np.ceil(float(len(times)) / decim))
freqs = range(50, 55)
decim = 2
_, n_chan, n_time = data.shape
tfr = cwt_morlet(data[0, :, :], sfreq=epochs.info['sfreq'],
freqs=freqs, decim=decim)
assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))
# Test cwt modes
Ws = morlet(512, [10, 20], n_cycles=2)
assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
for use_fft in [True, False]:
for mode in ['same', 'valid', 'full']:
# XXX JRK: full wavelet decomposition needs to be implemented
if (not use_fft) and mode == 'full':
assert_raises(ValueError, cwt, data[0, :, :], Ws,
use_fft=use_fft, mode=mode)
continue
cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode)
# Test decim parameter checks
assert_raises(TypeError, single_trial_power, data, Fs, freqs,
use_fft=False, n_cycles=2, decim=None)
assert_raises(TypeError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, use_fft=True, return_itc=True,
decim='decim')
def test_dpsswavelet():
"""Test DPSS tapers."""
freqs = np.arange(5, 25, 3)
Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0,
zero_mean=True)
assert_true(len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert_true(np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert_true(len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@slow_test
def test_tfr_multitaper():
"""Test tfr_multitaper."""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(35, 70, 5, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, decim=slice(0, 2))
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_epochs = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False)
power_averaged = power_epochs.average()
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False).average()
print(power_evoked) # test repr for EpochsTFR
assert_raises(ValueError, tfr_multitaper, epochs,
freqs=freqs, n_cycles=freqs / 2.,
return_itc=True, average=False)
# test picks argument
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_averaged.data)
assert_array_almost_equal(power.times, power_epochs.times)
assert_array_almost_equal(power.times, power_averaged.times)
assert_equal(power.nave, power_averaged.nave)
assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
assert_array_almost_equal(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
assert_raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert_true(tmax > 0.3 and tmax < 0.7)
assert_false(np.any(itc.data < 0.))
assert_true(fmax > 40 and fmax < 60)
assert_true(power2.data.shape == (len(picks), len(freqs), 2))
assert_true(power2.data.shape == itc2.data.shape)
# Test decim parameter checks and compatibility between wavelets length
# and instance length in the time dimension.
assert_raises(TypeError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))
assert_raises(ValueError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=1000, time_bandwidth=4.0)
def test_crop():
"""Test TFR cropping."""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(0.2, 0.3)
assert_array_equal(tfr.times, [0.2, 0.3])
assert_equal(tfr.data.shape[-1], 2)
@requires_h5py
def test_io():
"""Test TFR IO capacities."""
tempdir = _TempDir()
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
assert_raises(IOError, tfr.save, fname)
tfr.comment = None
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
assert_true(isinstance(tfr.info, mne.Info))
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
assert_raises(ValueError, read_tfrs, fname, condition='nonono')
def test_plot():
"""Test TFR plotting."""
import matplotlib.pyplot as plt
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.plot([1, 2], title='title')
plt.close('all')
ax = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (1, 1))
ax3 = plt.subplot2grid((2, 2), (0, 1))
tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
fig = tfr.plot(picks=[1], cmap='RdBu_r') # interactive mode on by default
fig.canvas.key_press_event('up')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('down')
cbar = fig.get_axes()[0].CB # Fake dragging with mouse.
ax = cbar.cbar.ax
_fake_click(fig, ax, (0.1, 0.1))
_fake_click(fig, ax, (0.1, 0.2), kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
_fake_click(fig, ax, (0.1, 0.1), button=3)
_fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
plt.close('all')
def test_add_channels():
"""Test tfr splitting / re-appending channel types."""
data = np.zeros((6, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(
['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)
tfr_meg = tfr.copy().pick_types(meg=True)
tfr_stim = tfr.copy().pick_types(meg=False, stim=True)
tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])
assert_true(all(ch in tfr_new.ch_names
for ch in tfr_stim.ch_names + tfr_meg.ch_names))
tfr_new = tfr_meg.copy().add_channels([tfr_eeg])
assert_true(ch in tfr_new.ch_names for ch in tfr.ch_names)
assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
assert_true(all(ch not in tfr_new.ch_names
for ch in tfr_stim.ch_names))
# Now test errors
tfr_badsf = tfr_eeg.copy()
tfr_badsf.info['sfreq'] = 3.1415927
tfr_eeg = tfr_eeg.crop(-.1, .1)
assert_raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
assert_raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
assert_raises(ValueError, tfr_meg.add_channels, [tfr_meg])
assert_raises(AssertionError, tfr_meg.add_channels, tfr_badsf)
def test_compute_tfr():
"""Test _compute_tfr function."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
events = read_events(event_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=[], exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), add_eeg_ref=False)
data = epochs.get_data()
sfreq = epochs.info['sfreq']
freqs = np.arange(10, 20, 3).astype(float)
# Check all combination of options
for method, use_fft, zero_mean, output in product(
('multitaper', 'morlet'), (False, True), (False, True),
('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')):
# Check exception
if (method == 'multitaper') and (output == 'phase'):
assert_raises(NotImplementedError, _compute_tfr, data, freqs,
sfreq, method=method, output=output)
continue
# Check runs
out = _compute_tfr(data, freqs, sfreq, method=method,
use_fft=use_fft, zero_mean=zero_mean,
n_cycles=2., output=output)
# Check shapes
shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
if ('avg' in output) or ('itc' in output):
assert_array_equal(shape[1:], out.shape)
else:
assert_array_equal(shape, out.shape)
# Check types
if output in ('complex', 'avg_power_itc'):
assert_equal(np.complex, out.dtype)
else:
assert_equal(np.float, out.dtype)
assert_true(np.all(np.isfinite(out)))
# Check that functions are equivalent to
# i) single_trial_power: X, shape (n_signals, n_chans, n_times)
old_power = single_trial_power(data, sfreq, freqs, n_cycles=2.)
new_power = _compute_tfr(data, freqs, sfreq, n_cycles=2.,
method='morlet', output='power')
assert_array_almost_equal(old_power, new_power)
old_power = single_trial_power(data, sfreq, freqs, n_cycles=2.,
times=epochs.times, baseline=(-.100, 0),
baseline_mode='ratio')
new_power = rescale(new_power, epochs.times, (-.100, 0), 'ratio')
# ii) cwt_morlet: X, shape (n_signals, n_times)
old_complex = cwt_morlet(data[0], sfreq, freqs, n_cycles=2.)
new_complex = _compute_tfr(data[[0]], freqs, sfreq, n_cycles=2.,
method='morlet', output='complex')
assert_array_almost_equal(old_complex, new_complex[0])
# Check errors params
for _data in (None, 'foo', data[0]):
assert_raises(ValueError, _compute_tfr, _data, freqs, sfreq)
for _freqs in (None, 'foo', [[0]]):
assert_raises(ValueError, _compute_tfr, data, _freqs, sfreq)
for _sfreq in (None, 'foo'):
assert_raises(ValueError, _compute_tfr, data, freqs, _sfreq)
for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
for value in (None, 'foo'):
kwargs = {key: value} # FIXME pep8
assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
**kwargs)
# No time_bandwidth param in morlet
assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
method='morlet', time_bandwidth=1)
# No phase in multitaper XXX Check ?
assert_raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
method='multitaper', output='phase')
# Inter-trial coherence tests
out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
assert_true(np.sum(out >= 1) == 0)
assert_true(np.sum(out <= 0) == 0)
# Check decim shapes
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
_decim = slice(None, None, decim) if isinstance(decim, int) else decim
n_time = len(np.arange(data.shape[2])[_decim])
shape = np.r_[data.shape[:2], len(freqs), n_time]
for method in ('multitaper', 'morlet'):
# Single trials
out = _compute_tfr(data, freqs, sfreq, method=method,
decim=decim, n_cycles=2.)
assert_array_equal(shape, out.shape)
# Averages
out = _compute_tfr(data, freqs, sfreq, method=method,
decim=decim, output='avg_power',
n_cycles=2.)
assert_array_equal(shape[1:], out.shape)
run_tests_if_main()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
andrewnc/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
ishanic/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
stevereyes01/pycbc | examples/distributions/mass_examples.py | 14 | 1651 | import matplotlib.pyplot as plt
from pycbc import distributions
# Create a mass distribution object that is uniform between 0.5 and 1.5
# solar masses.
mass1_distribution = distributions.Uniform(mass1=(0.5, 1.5))
# Take 100000 random variable samples from this uniform mass distribution.
mass1_samples = mass1_distribution.rvs(size=1000000)
# Draw another distribution that is Gaussian between 0.5 and 1.5 solar masses
# with a mean of 1.2 solar masses and a standard deviation of 0.15 solar
# masses. Gaussian takes the variance as an input so square the standard
# deviation.
variance = 0.15*0.15
mass2_gaussian = distributions.Gaussian(mass2=(0.5, 1.5), mass2_mean=1.2,
mass2_var=variance)
# Take 100000 random variable samples from this gaussian mass distribution.
mass2_samples = mass2_gaussian.rvs(size=1000000)
# We can make pairs of distributions together, instead of apart.
two_mass_distributions = distributions.Uniform(mass3=(1.6, 3.0),
mass4=(1.6, 3.0))
two_mass_samples = two_mass_distributions.rvs(size=1000000)
# Choose 50 bins for the histogram subplots.
n_bins = 50
# Plot histograms of samples in subplots
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3, = axes.flat
ax0.hist(mass1_samples['mass1'], bins = n_bins)
ax1.hist(mass2_samples['mass2'], bins = n_bins)
ax2.hist(two_mass_samples['mass3'], bins = n_bins)
ax3.hist(two_mass_samples['mass4'], bins = n_bins)
ax0.set_title('Mass 1 samples')
ax1.set_title('Mass 2 samples')
ax2.set_title('Mass 3 samples')
ax3.set_title('Mass 4 samples')
plt.tight_layout()
plt.show()
| gpl-3.0 |
puruckertom/ubertool | ubertool/trex/tests/trex_process_qaqc.py | 1 | 1374 | from __future__ import division #brings in Python 3.0 mixed type calculation rules
import os
# needs to be run whenever the qaqc csv is updated
csv_path = os.path.join(os.path.dirname(__file__),"trex_qaqc.csv")
csv_in = os.path.join(os.path.dirname(__file__),"trex_qaqc_in_transpose.csv")
csv_exp = os.path.join(os.path.dirname(__file__),"trex_qaqc_exp_transpose.csv")
import pandas as pd
#skiprows 0-indexed (supposedly, but does not seem to be the case)
#skipfooter- number of rows at bottom to skip
try:
pd_obj_inputs = pd.read_csv(csv_path, index_col=0, header=None, skiprows=1, skipfooter=314, engine='python')
pd_obj_inputs = pd_obj_inputs.drop(labels=pd_obj_inputs.columns[range(4)], axis=1)
pd_obj_inputs.index.name = None
pd_obj_inputs.columns -= 5
pd_obj_inputs_transposed = pd_obj_inputs.transpose()
print(pd_obj_inputs_transposed)
pd_obj_inputs_transposed.to_csv(csv_in)
pd_obj_exp_out = pd.read_csv(csv_path, index_col=0, header=None, skiprows=200, engine='python', na_values='')
pd_obj_exp_out = pd_obj_exp_out.drop(labels=pd_obj_exp_out.columns[range(4)], axis=1)
pd_obj_exp_out.index.name = None
pd_obj_exp_out.columns -= 5
pd_obj_exp_out_transposed = pd_obj_exp_out.transpose()
print(pd_obj_exp_out_transposed)
pd_obj_exp_out_transposed.to_csv(csv_exp)
except Exception as e:
print (e.message) | unlicense |
jseabold/statsmodels | statsmodels/tools/validation/tests/test_validation.py | 4 | 12589 | from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from statsmodels.tools.validation import (
array_like,
PandasWrapper,
bool_like,
dict_like,
float_like,
int_like,
string_like,
)
from statsmodels.tools.validation.validation import _right_squeeze
@pytest.fixture(params=[True, False])
def use_pandas(request):
return request.param
def gen_data(dim, use_pandas):
if dim == 1:
out = np.empty(10,)
if use_pandas:
out = pd.Series(out)
elif dim == 2:
out = np.empty((20, 10))
if use_pandas:
out = pd.DataFrame(out)
else:
out = np.empty(np.arange(5, 5 + dim))
return out
class TestArrayLike(object):
def test_1d(self, use_pandas):
data = gen_data(1, use_pandas)
a = array_like(data, "a")
assert a.ndim == 1
assert a.shape == (10,)
assert type(a) is np.ndarray
a = array_like(data, "a", ndim=1)
assert a.ndim == 1
a = array_like(data, "a", shape=(10,))
assert a.shape == (10,)
a = array_like(data, "a", ndim=1, shape=(None,))
assert a.ndim == 1
a = array_like(data, "a", ndim=2, shape=(10, 1))
assert a.ndim == 2
assert a.shape == (10, 1)
with pytest.raises(ValueError, match="a is required to have shape"):
array_like(data, "a", shape=(5,))
def test_2d(self, use_pandas):
data = gen_data(2, use_pandas)
a = array_like(data, "a", ndim=2)
assert a.ndim == 2
assert a.shape == (20, 10)
assert type(a) is np.ndarray
a = array_like(data, "a", ndim=2)
assert a.ndim == 2
a = array_like(data, "a", ndim=2, shape=(20, None))
assert a.shape == (20, 10)
a = array_like(data, "a", ndim=2, shape=(20,))
assert a.shape == (20, 10)
a = array_like(data, "a", ndim=2, shape=(None, 10))
assert a.shape == (20, 10)
a = array_like(data, "a", ndim=2, shape=(None, None))
assert a.ndim == 2
a = array_like(data, "a", ndim=3)
assert a.ndim == 3
assert a.shape == (20, 10, 1)
with pytest.raises(ValueError, match="a is required to have shape"):
array_like(data, "a", ndim=2, shape=(10,))
with pytest.raises(ValueError, match="a is required to have shape"):
array_like(data, "a", ndim=2, shape=(20, 20))
with pytest.raises(ValueError, match="a is required to have shape"):
array_like(data, "a", ndim=2, shape=(None, 20))
match = "a is required to have ndim 1 but has ndim 2"
with pytest.raises(ValueError, match=match):
array_like(data, "a", ndim=1)
match = "a must have ndim <= 1"
with pytest.raises(ValueError, match=match):
array_like(data, "a", maxdim=1)
def test_3d(self):
data = gen_data(3, False)
a = array_like(data, "a", ndim=3)
assert a.shape == (5, 6, 7)
assert a.ndim == 3
assert type(a) is np.ndarray
a = array_like(data, "a", ndim=3, shape=(5, None, 7))
assert a.shape == (5, 6, 7)
a = array_like(data, "a", ndim=3, shape=(None, None, 7))
assert a.shape == (5, 6, 7)
a = array_like(data, "a", ndim=5)
assert a.shape == (5, 6, 7, 1, 1)
with pytest.raises(ValueError, match="a is required to have shape"):
array_like(data, "a", ndim=3, shape=(10,))
with pytest.raises(ValueError, match="a is required to have shape"):
array_like(data, "a", ndim=3, shape=(None, None, 5))
match = "a is required to have ndim 2 but has ndim 3"
with pytest.raises(ValueError, match=match):
array_like(data, "a", ndim=2)
match = "a must have ndim <= 1"
with pytest.raises(ValueError, match=match):
array_like(data, "a", maxdim=1)
match = "a must have ndim <= 2"
with pytest.raises(ValueError, match=match):
array_like(data, "a", maxdim=2)
def test_right_squeeze_and_pad(self):
data = np.empty((2, 1, 2))
a = array_like(data, "a", ndim=3)
assert a.shape == (2, 1, 2)
data = np.empty((2))
a = array_like(data, "a", ndim=3)
assert a.shape == (2, 1, 1)
data = np.empty((2, 1))
a = array_like(data, "a", ndim=3)
assert a.shape == (2, 1, 1)
data = np.empty((2, 1, 1, 1))
a = array_like(data, "a", ndim=3)
assert a.shape == (2, 1, 1)
data = np.empty((2, 1, 1, 2, 1, 1))
with pytest.raises(ValueError):
array_like(data, "a", ndim=3)
def test_contiguous(self):
x = np.arange(10)
y = x[::2]
a = array_like(y, "a", contiguous=True)
assert not y.flags["C_CONTIGUOUS"]
assert a.flags["C_CONTIGUOUS"]
def test_dtype(self):
x = np.arange(10)
a = array_like(x, "a", dtype=np.float32)
assert a.dtype == np.float32
a = array_like(x, "a", dtype=np.uint8)
assert a.dtype == np.uint8
@pytest.mark.xfail(reason="Failing for now")
def test_dot(self, use_pandas):
data = gen_data(2, use_pandas)
a = array_like(data, "a")
assert not isinstance(a.T.dot(data), array_like)
assert not isinstance(a.T.dot(a), array_like)
def test_slice(self, use_pandas):
data = gen_data(2, use_pandas)
a = array_like(data, "a", ndim=2)
assert type(a[1:]) is np.ndarray
def test_right_squeeze():
x = np.empty((10, 1, 10))
y = _right_squeeze(x)
assert y.shape == (10, 1, 10)
x = np.empty((10, 10, 1))
y = _right_squeeze(x)
assert y.shape == (10, 10)
x = np.empty((10, 10, 1, 1, 1, 1, 1))
y = _right_squeeze(x)
assert y.shape == (10, 10)
x = np.empty((10, 1, 10, 1, 1, 1, 1, 1))
y = _right_squeeze(x)
assert y.shape == (10, 1, 10)
def test_wrap_pandas(use_pandas):
a = gen_data(1, use_pandas)
b = gen_data(1, False)
wrapped = PandasWrapper(a).wrap(b)
expected_type = pd.Series if use_pandas else np.ndarray
assert isinstance(wrapped, expected_type)
assert not use_pandas or wrapped.name is None
wrapped = PandasWrapper(a).wrap(b, columns="name")
assert isinstance(wrapped, expected_type)
assert not use_pandas or wrapped.name == "name"
wrapped = PandasWrapper(a).wrap(b, columns=["name"])
assert isinstance(wrapped, expected_type)
assert not use_pandas or wrapped.name == "name"
expected_type = pd.DataFrame if use_pandas else np.ndarray
wrapped = PandasWrapper(a).wrap(b[:, None])
assert isinstance(wrapped, expected_type)
assert not use_pandas or wrapped.columns[0] == 0
wrapped = PandasWrapper(a).wrap(b[:, None], columns=["name"])
assert isinstance(wrapped, expected_type)
assert not use_pandas or wrapped.columns == ["name"]
if use_pandas:
match = "Can only wrap 1 or 2-d array_like"
with pytest.raises(ValueError, match=match):
PandasWrapper(a).wrap(b[:, None, None])
match = "obj must have the same number of elements in axis 0 as"
with pytest.raises(ValueError, match=match):
PandasWrapper(a).wrap(b[: b.shape[0] // 2])
def test_wrap_pandas_append():
a = gen_data(1, True)
a.name = "apple"
b = gen_data(1, False)
wrapped = PandasWrapper(a).wrap(b, append="appended")
expected = "apple_appended"
assert wrapped.name == expected
a = gen_data(2, True)
a.columns = ["apple_" + str(i) for i in range(a.shape[1])]
b = gen_data(2, False)
wrapped = PandasWrapper(a).wrap(b, append="appended")
expected = [c + "_appended" for c in a.columns]
assert list(wrapped.columns) == expected
def test_wrap_pandas_append_non_string():
# GH 6826
a = gen_data(1, True)
a.name = 7
b = gen_data(1, False)
wrapped = PandasWrapper(a).wrap(b, append="appended")
expected = "7_appended"
assert wrapped.name == expected
a = gen_data(2, True)
a.columns = [i for i in range(a.shape[1])]
b = gen_data(2, False)
wrapped = PandasWrapper(a).wrap(b, append="appended")
expected = [f"{c}_appended" for c in a.columns]
assert list(wrapped.columns) == expected
class CustomDict(dict):
pass
@pytest.fixture(params=(dict, OrderedDict, CustomDict, None))
def dict_type(request):
return request.param
def test_optional_dict_like(dict_type):
val = dict_type() if dict_type is not None else dict_type
out = dict_like(val, "value", optional=True)
assert isinstance(out, type(val))
def test_optional_dict_like_error():
match = r"value must be a dict or dict_like \(i.e., a Mapping\)"
with pytest.raises(TypeError, match=match):
dict_like([], "value", optional=True)
with pytest.raises(TypeError, match=match):
dict_like({"a"}, "value", optional=True)
with pytest.raises(TypeError, match=match):
dict_like("a", "value", optional=True)
def test_string():
out = string_like("apple", "value")
assert out == "apple"
out = string_like("apple", "value", options=("apple", "banana", "cherry"))
assert out == "apple"
with pytest.raises(TypeError, match="value must be a string"):
string_like(1, "value")
with pytest.raises(TypeError, match="value must be a string"):
string_like(b"4", "value")
with pytest.raises(
ValueError,
match="value must be one of: 'apple'," " 'banana', 'cherry'",
):
string_like("date", "value", options=("apple", "banana", "cherry"))
def test_optional_string():
out = string_like("apple", "value")
assert out == "apple"
out = string_like("apple", "value", options=("apple", "banana", "cherry"))
assert out == "apple"
out = string_like(None, "value", optional=True)
assert out is None
out = string_like(
None, "value", optional=True, options=("apple", "banana", "cherry")
)
assert out is None
with pytest.raises(TypeError, match="value must be a string"):
string_like(1, "value", optional=True)
with pytest.raises(TypeError, match="value must be a string"):
string_like(b"4", "value", optional=True)
@pytest.fixture(params=(1.0, 1.1, np.float32(1.2), np.array([1.2]), 1.2 + 0j))
def floating(request):
return request.param
@pytest.fixture(params=(np.empty(2), 1.2 + 1j, True, "3.2", None))
def not_floating(request):
return request.param
def test_float_like(floating):
assert isinstance(float_like(floating, "floating"), float)
assert isinstance(float_like(floating, "floating", optional=True), float)
assert float_like(None, "floating", optional=True) is None
if isinstance(floating, (int, np.integer, float, np.inexact)):
assert isinstance(float_like(floating, "floating", strict=True), float)
assert float_like(None, "floating", optional=True, strict=True) is None
def test_not_float_like(not_floating):
with pytest.raises(TypeError):
float_like(not_floating, "floating")
@pytest.fixture(params=(1.0, 2, np.float32(3.0), np.array([4.0])))
def integer(request):
return request.param
@pytest.fixture(
params=(
3.2,
np.float32(3.2),
3 + 2j,
complex(2.3 + 0j),
"apple",
1.0 + 0j,
np.timedelta64(2),
)
)
def not_integer(request):
return request.param
def test_int_like(integer):
assert isinstance(int_like(integer, "integer"), int)
assert isinstance(int_like(integer, "integer", optional=True), int)
assert int_like(None, "floating", optional=True) is None
if isinstance(integer, (int, np.integer)):
assert isinstance(int_like(integer, "integer", strict=True), int)
assert int_like(None, "floating", optional=True, strict=True) is None
def test_not_int_like(not_integer):
with pytest.raises(TypeError):
int_like(not_integer, "integer")
@pytest.fixture(params=[True, False, 1, 1.2, "a", ""])
def boolean(request):
return request.param
def test_bool_like(boolean):
assert isinstance(bool_like(boolean, "boolean"), bool)
assert bool_like(None, "boolean", optional=True) is None
if isinstance(boolean, bool):
assert isinstance(bool_like(boolean, "boolean", strict=True), bool)
else:
with pytest.raises(TypeError):
bool_like(boolean, "boolean", strict=True)
def test_not_bool_like():
with pytest.raises(TypeError):
bool_like(np.array([True, True]), boolean)
| bsd-3-clause |
Lawrence-Liu/scikit-learn | setup.py | 143 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
jcasner/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_mixed.py | 70 | 3776 | from matplotlib._image import frombuffer
from matplotlib.backends.backend_agg import RendererAgg
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, width, height, dpi, vector_renderer, raster_renderer_class=None):
"""
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
assert not vector_renderer.option_image_nocomposite()
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
width, height = self._width * self.dpi, self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = frombuffer(buffer, w, h, True)
image.is_grayscale = False
image.flipud_out()
self._renderer.draw_image(l, height - b - h, image, None)
self._raster_renderer = None
self._rasterizing = False
| agpl-3.0 |
enricopal/snowball_decision | decision_algorithm.py | 1 | 25944 | import numpy as np
import random
import networkx as nx
from operator import itemgetter
import pandas as pd
import sys
import json
import optparse
###############################################################
#### CONCORDANCE, DISCORDANCE AND CREDIBILITY FUNCTIONS ######
###############################################################
def conc_func(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - alternatives[j,k])
q = float(indiff_thresh[k])
p = float(pref_thresh[k])
if (p != q): #check that the angular coeff. exists
if (x < q):
return 1
elif (x < p):
return (-x)/(p-q) + (p)/(p-q)
elif (x >= p):
return 0
else: #otherwise it is a step function
if (x <= p):
return 1
else:
return 0
def disc_func(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - alternatives[j,k])
v = float(vetos[k])
p = float(pref_thresh[k])
if (p!=v):#check that the angular coeff. exists
if (x <= p):
return 0
elif (x <= v):
return (x)/(v-p) - (p)/(v-p)
elif (x > v):
return 1
else: #otherwise it is a step function
if (x <= p):
return 0
else:
return 1
#define the concordance and discordance functions
def conc_func_tri(i,j,k): #computes the concordance given a pair alternative-profile i and j and a given criterion k
x = float(alternatives[i,k] - profiles[j,k])
q = float(indiff_thresh[k])
p = float(pref_thresh[k])
if (p != q): #check that the angular coeff. exists
if (x < q):
return 1
elif (x < p):
return (-x)/(p-q) + (p)/(p-q)
elif (x >= p):
return 0
else: #otherwise it is a step function
if (x <= p):
return 1
else:
return 0
def disc_func_tri(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - profiles[j,k])
v = float(vetos[k])
p = float(pref_thresh[k])
if (p!=v):#check that the angular coeff. exists
if (x <= p):
return 0
elif (x <= v):
return (x)/(v-p) - (p)/(v-p)
elif (x > v):
return 1
else: #otherwise it is a step function
if (x <= p):
return 0
else:
return 1
def concordance_tri(i,j):
c = []
for k in range(m): #for each criterion
c.append(weights[k]*conc_func_tri(i,j,k))
return sum(c)
#define the credibility of the outranking as a function of concordance and discordance
def credibility_tri(i,j):
c = concordance_tri(i,j)
fact = c
for k in range(m):#for each criterion
d = disc_func_tri(i,j,k) #just for simplicity of notation
if (d > c): #if the discordance of the criterion is greater than the overall concordance
fact = fact * (1-d) / (1-c)
return fact
#define the concordance and discordance for a pair of alternatives
def concordance(i,j):
c = []
for k in range(m): #for each criterion
c.append(weights[k]*conc_func(i,j,k))
return sum(c)
#define the credibility of the outranking as a function of concordance and discordance
def credibility(i,j):
c = concordance(i,j)
fact = c
for k in range(m):#for each criterion
d = disc_func(i,j,k) #just for simplicity of notation
if (d > c): #if the discordance of the criterion is greater than the overall concordance
fact = fact * (1-d) / (1-c)
return fact
def discrimination_thresh(x):#non constant threshold
return a - b*x
#########################################
############ ALGORITHMS #################
#########################################
#distillation algorithm
def compute_scores_2(cred_matrix,altern_list):
n = len(altern_list)
scores = {} #vector holding the score of each alternative
keys = altern_list
for i in keys: #initialize to 0 the scores
scores[i] = 0
#compute the max credibility
l = max(cred_matrix.values())
alpha = discrimination_thresh(l) #compute the discrimination threshold
for i in altern_list: #for each alternative
for j in altern_list:
if i!=j: #excluding the diagonal elements
if(cred_matrix[(i,j)] >= l - alpha):
scores[i] += 1
if(cred_matrix[(j,i)] >= l - alpha):
scores[i] -= 1
return scores
#what happens when there are more than two alternatives
def runoff(cred_matrix,maxima_matrix, maxima):
scores = {}
scores = compute_scores_2(maxima_matrix,maxima) #first step of the algorithm
#check if there is a unique max
maxima_run = []
maximum = max(scores.values())
for i in scores.keys():#create a list with the alternatives that have maximum score
if scores[i] == maximum:
maxima_run.append(i)
if len(maxima_run) == 1: #if there is a unique max
ranking.append(maxima_run[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == maxima_run[0] or j == maxima_run[0]:
del cred_matrix[(i,j)]
altern_list.remove(maxima_run[0])
distillation_2(cred_matrix)
elif len(maxima_run) > 1:#otherwise put them all together with the same ranking
ranking.append(maxima_run)
#eliminate the winning alternatives from the matrix
if len(cred_matrix) > len(maxima_run):#se ho altre alternative di cui fare il ranking, rimuovo quelle ottenute
#print cred_matrix
for j in maxima_run:
altern_list.remove(j)
for i,k in cred_matrix.keys():
if i == j or k == j:
del cred_matrix[(i,k)]
#print cred_matrix.values(), maxima_run
distillation_2(cred_matrix)
else: #altrimenti l'algoritmo si ferma
return ranking
#initializing the variables
def distillation_2(cred_matrix):
#print cred_matrix
if len(cred_matrix) == 1: #there is just one alternative left, the algorithm has to stop
ranking.append(altern_list[0]) #add the last element
if len(cred_matrix) > 1: #are there any more alternatives to rank?
scores = {}
scores = compute_scores_2(cred_matrix,altern_list) #first step of the algorithm
#check if there is a unique max
maxima = []
#index_maxima = []
nonmaxima = []
#nonmaxima_all = []
#index_nonmaxima = []
maxima_matrix = []
maximum = max(scores.values())
for i in scores.keys():#create a list with the alternatives that have maximum score
if scores[i] == maximum:
maxima.append(i)
else:
nonmaxima.append(i)
if len(maxima) == 1: #if there is a unique max
ranking.append(maxima[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == maxima[0] or j == maxima[0]:
del cred_matrix[(i,j)]
altern_list.remove(maxima[0])
distillation_2(cred_matrix)
if len(maxima) > 1:
#devo costruire la sottomatrice dei massimi
#rimuovo quelli che non sono massimi dalla matrice di credibilit
maxima_matrix = {}
for i in cred_matrix.keys():
maxima_matrix[i] = cred_matrix[i]
for k in nonmaxima: #elimino tutti i non_massimi
for i,j in maxima_matrix.keys():
if i == k or j == k:
del maxima_matrix[(i,j)]
#print cred_matrix
#then I apply the runoff to the submatrix of maxima
runoff(cred_matrix,maxima_matrix, maxima)
return ranking
#what happens when there are more than two alternatives
def runoff_asc(cred_matrix,minima_matrix, minima):
scores = {}
scores = compute_scores_2(minima_matrix,minima) #first step of the algorithm
#find the minima
minima_run = []
minimum = min(scores.values())
for i in scores.keys():#create a list with the alternatives that have minimum score
if scores[i] == minimum:
minima_run.append(i)
#check if there is a unique min
if len(minima_run) == 1: #if there is a unique max
ranking.append(minima_run[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == minima_run[0] or j == minima_run[0]:
del cred_matrix[(i,j)]
altern_list.remove(minima_run[0])
distillation_2_asc(cred_matrix)
elif len(minima_run) > 1:#otherwise put them all together with the same ranking
ranking.append(minima_run)
#eliminate the winning alternatives from the matrix
if len(cred_matrix) > len(minima_run):#se ho altre alternative di cui fare il ranking, rimuovo quelle ottenute
for j in minima_run:
altern_list.remove(j)
for i,k in cred_matrix.keys():
if i == j or k == j:
del cred_matrix[(i,k)]
distillation_2_asc(cred_matrix)
else: #altrimenti l'algoritmo si ferma
return ranking
def distillation_2_asc(cred_matrix):
#there is just one alternative left, the algorithm has to stop
if len(cred_matrix) == 1:
#print cred_matrix
ranking.append(altern_list[0]) #add the last element
#are there any more alternatives to rank?
if len(cred_matrix) > 1:
scores = {}
scores = compute_scores_2(cred_matrix,altern_list) #first step of the algorithm
#find the minima
minima = []
nonminima = []
minima_matrix = []
minimum = min(scores.values())
for i in scores.keys():#create a list with the alternatives that have minimum score
if scores[i] == minimum:
minima.append(i)
else:
nonminima.append(i)
if len(minima) == 1: #if there is a unique max
ranking.append(minima[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == minima[0] or j == minima[0]:
del cred_matrix[(i,j)]
altern_list.remove(minima[0])
distillation_2_asc(cred_matrix)
#if there's more than a minimum
if len(minima) > 1:
#devo costruire la sottomatrice dei minimi
#rimuovo quelli che non sono minimi dalla matrice di credibilit
minima_matrix = {}
for i in cred_matrix.keys():
minima_matrix[i] = cred_matrix[i]
for k in nonminima: #elimino tutti i non minimi
for i,j in minima_matrix.keys():
if i == k or j == k:
del minima_matrix[(i,j)]
#then I apply the runoff to the submatrix of maxima
runoff_asc(cred_matrix,minima_matrix, minima)
return ranking
def ELECTREIII(x):
global alternatives
alternatives = x
#################################
### credibility matrix ##########
#################################
cred_matrix = {} #described by a dictionary taking a tuple (i,j) as key
for i in range(n): #assigning the values to the cred_matrix
for j in range(n):
cred_matrix[(i,j)] = credibility(i,j)
################################
## computing the threshold #####
################################
#compute the max element l of the cred_matrix
l = max(cred_matrix.values())
#calcolo alpha
alpha = a - b*l
#############################
####### distillation ########
#############################
#calculating discending ranking
global ranking
ranking = []
global altern_list
altern_list = range(n)
disc_order = distillation_2(cred_matrix)
#calculating ascending ranking
ranking = []
altern_list = range(n)
#reinitializing the credibility matrix
cred_matrix = {} #described by a dictionary taking a tuple (i,j) as key
for i in range(n): #assigning the values to the cred_matrix
for j in range(n):
cred_matrix[(i,j)] = credibility(i,j)
'''
asc_order = distillation_2_asc(cred_matrix)
#the asc_order must be reversed
asc_order = asc_order[::-1]
#print disc_order, asc_order
#turning lists into dictionaries
rank_asc = {}
'''
rank_disc = {}
'''
for i in range(len(asc_order)):
if type(asc_order[i]) == list:#means I can iter through it
for j in asc_order[i]:
rank_asc[j] = i
else: #if it is a single number I can make directly the association
rank_asc[asc_order[i]] = i
'''
for i in range(len(disc_order)):
if type(disc_order[i]) == list:
for j in disc_order[i]:
rank_disc[j] = i
else:
rank_disc[disc_order[i]] = i
#######################################
##### combining the rankings ##########
#######################################
adjacency = np.zeros((n,n))
'''
#compare all pair of alternatives
#if i outranks j in one of the two orders and j does not outrank i in the other, i outranks j in the final order
#otherwise, they are incomparable
#N.B. the lower the ranking, the better
for i in range(n):
for j in range(n):
if i != j:
if rank_asc[i] < rank_asc[j] and rank_disc[i] <= rank_disc[j]:
adjacency[i,j] = 1
if rank_disc[i] < rank_disc[j] and rank_asc[i] <= rank_asc[j]:
adjacency[i,j] = 1
#creating the outranking graph
G = nx.DiGraph()
G.add_nodes_from(range(n))
for i in range(n):
for j in range(n):
if adjacency[i,j] == 1:
G.add_edge(i,j)
indegree = nx.in_degree_centrality(G)
rank = {}
for i in G.nodes():
rank[i] = (n-1)*indegree[i]
#print asc_order
#rescaling to an ordinal sequence
#let us count the number of distinct elements in the indegree
count = 1
for i in range(len(rank.values())-1):
if rank.values()[i] != rank.values()[i+1]:
count += 1
'''
#sorted_rank = sorted(rank.iteritems(), key=itemgetter(1)) #list representing the pair of values
sorted_rank = sorted(rank_disc.iteritems(), key=itemgetter(1)) #list representing the pair of values
#transformation to the data
sorted_rank = np.array(sorted_rank)
for i in range(len(sorted_rank) - 1):
if sorted_rank[i + 1][1] - sorted_rank[i][1] > 1:
sorted_rank[i + 1][1] = sorted_rank[i][1] + 1
final_rank = {}
for i,j in sorted_rank:
final_rank[i] = j
return sorted_rank
####################################
##### RUN THE ALGORITHM ############
####################################
def decision_ranking(inputs, crit_weights, mitigation_strategies, indiff, pref, veto):
dati = pd.read_json(inputs)
global m
m = len(dati) #number of criteria
#normalizing the weights
global weights
weights = np.array(crit_weights)
total_weight = sum(weights)
if total_weight == 0:
weights = [1./m for i in range(m)]
else:
weights = weights/total_weight
#parameters of the model (vectors)
#vetos threshold
#concordance threshold
#discordance threshold
global vetos, pref_thresh, indiff_thresh,a,b
vetos = veto
pref_thresh = pref
indiff_thresh = indiff
#threshold parameters
a = 0.3
b = 0.15
length = len(dati.keys()) -1
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
global n
n = len(alternatives) #number of strategies
N = 101 #number of runs
results = [] #saving the ranking for each run
for i in range(N): #ripeto N volte
#original matrix
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
#random sampled
alternat = np.zeros((n,m))
#alternat[i,j] is the random sampling of a poissonian distribution of average alternatives[i,j]
for i in range(n):
for j in range(m):
alternat[i,j] = np.random.poisson(alternatives[i,j])
results.append(ELECTREIII(alternat))
#dictionary assigning to each alternative a list of its rankings
ranking_montecarlo = {}
#initializing
for i in range(n):
ranking_montecarlo[i] = []
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0])
l = int(j[1])
ranking_montecarlo[k].append(l)
#now we can compute the median
final_ranking_montecarlo = {}
for i in ranking_montecarlo.keys():
final_ranking_montecarlo[i] = np.median(ranking_montecarlo[i])
#compute the ranking distribution
#occurrences tells us the frequency of ranking r for alternative i
occurrences = np.zeros((n,n))
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0]) #alternative
l = int(j[1]) #rank
occurrences[k,l] += 1 #everytime I encounter the couple, I increment the frequency
#assign their names to the alternatives
named_final_ranking = {}
for i in final_ranking_montecarlo.keys():
named_final_ranking[dati.keys()[i+1]] = final_ranking_montecarlo[i] + 1 #assegno i nomi e faccio partire il ranking da 1
#assign the names to the ranking distributions
ranking_distributions = {}
var = 1
for i in occurrences:
ranking_distributions[dati.keys()[var]] = i
var += 1
####################
### OUTPUTS DATA ###
####################
#print "The medians of the ranking distributions are\n"
#print named_final_ranking
#print "\n"
#print "The ranking distributions are: \n"
#print ranking_distributions
return (named_final_ranking, ranking_distributions)
def ELECTRETri(x):
global alternatives
alternatives = x
#################################
###### credibility matrix #######
#################################
cred_matrix = np.zeros((n,M)) #initializing the credibility matrix
for i in range(n): #assigning the values to the cred_matrix
for j in range(M):
cred_matrix[i,j] = credibility_tri(i,j)
#################################
### turn the fuzzy into crisp ###
#################################
for i in range(n):
for j in range(M):
if cred_matrix[i,j] > lambd: #if cred is greater than a threshold
cred_matrix[i,j] = 1
else:
cred_matrix[i,j] = 0
###################################
########## exploration ############
###################################
pessimistic = {}
#per ogni alternativa calcolo quali reference profiles surclassa
for i in range(n):
pessimistic[i] = []
for j in range(M):
if cred_matrix[i,j] == 1:
pessimistic[i].append(j)
#dopodich individuo il migliore fra questi
for i in pessimistic.keys():
pessimistic[i] = min(pessimistic.values()[i])
#trasformo il dizionario in una lista ordinata
pessimistic = sorted(pessimistic.iteritems(), key = itemgetter(1))
return pessimistic
def decision_sorting(inputs, crit_weights,mitigation_strategies, indiff, pref, veto, prof):
dati = pd.read_json(inputs)
global m
m = len(dati) #number of criteria
#normalizing the weights
global weights
weights = np.array(crit_weights)
total_weight = sum(weights)
if total_weight == 0:
weights = [1./m for i in range(m)]
else:
weights = weights/total_weight
#parameters of the model (vectors)
#vetos threshold
#concordance threshold
#discordance threshold
global vetos, pref_thresh, indiff_thresh,lambd
vetos = veto
pref_thresh = pref
indiff_thresh = indiff
length = len(dati.keys())-1
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
global n
n = len(alternatives) #number of strategies
lambd = 0.75
#alternatives = np.array((dati['Basic building retrofitting'], dati['Enhanced building retrofitting'],dati['Evacuation'],dati['No mitigation']))
#n = len(alternatives)
global profiles
profiles = prof
#profiles = np.array(([5, 5,0,2,1,3,6], [25, 3500000,2500000,7000,180000,80,200],[1000, 2000000000,180000000,2000008,15020000,3000,6000]))
global M
M = len(profiles) #number of classes
N = 101 #number of runs
results = [] #saving the ranking for each run
for i in range(N): #ripeto N volte
#original matrix
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
#random sampled
alternat = np.zeros((n,m))
#alternat[i,j] is the random sampling of a poissonian distribution of average alternatives[i,j]
for i in range(n):
for j in range(m):
alternat[i,j] = np.random.poisson(alternatives[i,j])
results.append(ELECTRETri(alternat))
#dictionary assigning to each alternative a list of its categoriess
sorting_montecarlo = {}
#initializing
for i in range(n):
sorting_montecarlo[i] = []
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0])
l = int(j[1])
sorting_montecarlo[k].append(l)
#now we can compute the median
final_sorting_montecarlo = {}
for i in sorting_montecarlo.keys():
final_sorting_montecarlo[i] = np.median(sorting_montecarlo[i])
#we can assign letters instead of numbers
for i in final_sorting_montecarlo.keys():
if final_sorting_montecarlo[i] == 0:
final_sorting_montecarlo[i] = 'A'
elif final_sorting_montecarlo[i] == 1:
final_sorting_montecarlo[i] = 'B'
elif final_sorting_montecarlo[i] == 2:
final_sorting_montecarlo[i] = 'C'
#building the probability distribution
#occurrences tells us the frequency of ranking r for alternative i
occurrences = np.zeros((n,M))
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0]) #alternative
l = int(j[1]) #rank
occurrences[k,l] += 1 #everytime I encounter the couple, I increment the frequency
#assign their names to the alternatives
named_final_sorting = {}
for i in final_sorting_montecarlo.keys():
named_final_sorting[dati.keys()[i+1]] = final_sorting_montecarlo[i] #assegno i nomi e faccio partire il ranking da 1
#assign the names to the ranking distributions
sorting_distributions = {}
var = 1
for i in occurrences:
sorting_distributions[dati.keys()[var]] = i
var += 1
####################
### OUTPUTS DATA ###
####################
return (named_final_sorting, sorting_distributions)
#a = decision_sorting('santorini/scenario1_input.json',[0.2,0.1,0.3,0.0,0.2,0.1,0.1],['EVC_anteEQ1','EVC_anteEQ1_anteEQ2','No Mitigation'],
#print a[0],a[1]
b = decision_ranking('santorini/scenario1_input.json',[5,3,2,1,2,0,0],['EVC_anteEQ1','EVC_anteEQ1_anteEQ2','No Mitigation'],
np.array([0, 50, 50, 2, 50, 2, 20]), np.array([2, 100, 100, 20, 100, 20, 200]), np.array([5, 5000, 5000, 100, 5000, 100, 2000]))
print b[0],b[1]
#final_sorting, sorting_distribution = decision_sorting('santorini/fhg.json',[0.2 for i in range(8)],['UPS (uninterrupted power supply)','Redundancy within grids','Reinforcement of vulnerable nodes','No Mitigation'],
# np.array([5, 5, 5, 5, 5, 5, 5,5]), np.array([50, 50, 50, 50, 50, 50, 50, 50]), np.array([500, 500, 500, 500, 500, 500, 500, 500]),
# np.array(([30, 25,20,34,30,20,30,20],[50,50,50,50,50,50,50,50],[1000, 20000,18000,2000,5000,5000,6000,5000])))
#print final_sorting
#final_ranking, ranking_distribution = decision_ranking('santorini/fhg.json',[0.2 for i in range(8)],['UPS (uninterrupted power supply)','Redundancy within grids','Reinforcement of vulnerable nodes','No Mitigation'],
# np.array([5, 5, 5, 5, 5, 5, 5,5]), np.array([50, 50, 50, 50, 50, 50, 50, 50]), np.array([500, 500, 500, 500, 500, 500, 500, 500]))
#print final_ranking
| apache-2.0 |
kklmn/xrt | examples/withRaycing/00_xRayCalculator/calc_carbon_contamination.py | 1 | 1036 | # -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev"
__date__ = "14 Mar 2019"
import numpy as np
import matplotlib.pyplot as plt
# path to xrt:
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import xrt.backends.raycing.materials as rm
mSi = rm.Material('Si', rho=2.33)
mAu = rm.Material('Au', rho=19.3)
mC = rm.Material('C', rho=2.26)
mAuCont = rm.Multilayer(tLayer=mC, tThickness=10, # in Å
bLayer=mAu, bThickness=400, substrate=mSi,
nPairs=1, idThickness=2)
E = np.linspace(10, 120, 110) # degrees
theta = np.radians(7) * np.ones_like(E)
rs, rp = mAu.get_amplitude(E, np.sin(theta))[0:2]
plt.plot(E, abs(rs)**2, label='s-pol Au')
plt.plot(E, abs(rp)**2, label='p-pol Au')
rs, rp = mAuCont.get_amplitude(E, np.sin(theta))[0:2]
plt.plot(E, abs(rs)**2, label='s-pol Au with 4 nm C')
plt.plot(E, abs(rp)**2, label='p-pol Au with 4 nm C')
plt.gca().set_title('Reflectivity of clean and contaminated gold')
plt.gca().legend()
plt.show()
| mit |
hsiaoyi0504/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
deapplegate/wtgpipeline | SeeingClearly_for_coadds.py | 1 | 18684 | #! /usr/bin/env python
#adam-example# ./SeeingClearly_for_coadds.py /gpfs/slac/kipac/fs1/u/awright/SUBARU//MACS0429-02/W-C-RC/SCIENCE/coadd_MACS0429-02_good/coadd.fits
#adam-does# calculates seeing, has a plotting option to check if it's right or not
#adam-use# anything
#the basics
import commands
import matplotlib
host=commands.getoutput('hostname')
if not host.startswith('ki-ls'):
matplotlib.use('Agg')
from matplotlib.pyplot import *
from numpy import *
from glob import glob
from copy import deepcopy
import scipy
import itertools
#unlikely to need, so comment them out:
#from collections import Counter
#import cosmolopy
#import shutil
import hashlib
import astropy
import astropy.io.fits as pyfits
from astropy.io import ascii
#shell/non-python related stuff
import sys ; sys.path.append('/u/ki/awright/InstallingSoftware/pythons')
import time
import os
import shutil
import pdb
import re
import pickle
#my stuff!
from fitter import Gauss
from UsefulTools import names, FromPick_data_true, FromPick_data_spots, GetMiddle, GetSpots_bins_values, ShortFileString, num2str
import imagetools
import cattools
#super useful image packages
#import ds9
from scipy.stats import *
from scipy.ndimage import *
conn8=array([[1,1,1],[1,1,1],[1,1,1]])
conn4=array([[0,1,0],[1,1,1],[0,1,0]])
#BEFORE PLOTTING
tm_year,tm_mon,tm_mday,tm_hour,tm_min,tm_sec,tm_wday, tm_yday,tm_isdst=time.localtime()
DateString=str(tm_mon)+'/'+str(tm_mday)+'/'+str(tm_year)
FileString=ShortFileString(sys.argv[0])
from numpy import *
namespace=globals()
import scipy.signal as signal
import BartStar
import sextract
SEXDIR='/u/ki/awright/InstallingSoftware/pythons/sextractimtools/'
PLOTDIR='/u/ki/awright/data/eyes/CRNitschke_output/plot_SeeingClearly/'
if not 'DATACONF' in os.environ:
raise Exception("YOU MUST RUN THIS: . ~/wtgpipeline/progs.ini")
steps=arange(.4,2.0,.005) #chose this range and step size, could change later
def seeing_clearly_withplot(image,checkplots=1,saveas=None,**kwargs):
'''SAME AS seeing_clearly, BUT WITH PLOTTING!
Take input image, which could be one chip or a series of 10 chips and return the seeing value of that image.
(1) run the sextractor function to make a catalog with the parameters I'll need to identify stars
(2) Weight detections by how likely they are to be stars
(3) Step along the FWHM axis of the MAG, FWHM plane and find at which point the weighted sum of the neighboring detections is largest. This point is the seeing of the image.'''
try:
try:
header=pyfits.open(image)[0].header
except IOError:
header=pyfits.open(image[0])[0].header
try:
FILTER=header['FILTER']
except KeyError:
one_only=0
for filt in ["W-J-B","W-J-V","W-C-RC","W-C-IC","W-S-I+","W-S-Z+"]:
if filt in image:
if one_only==0:
one_only=1
else:
raise
filt_wins=filt
if one_only==1:
fo=pyfits.open(image,'update')
fo.verify('fix')
fo[0].header['FILTER']=filt_wins
fo.flush()
fo.close()
header=pyfits.open(image)[0].header
FILTER=header['FILTER']
else:
raise
#####DETERMINE CUT PARAMS#####
number_NN=200 #chose this number of nearest neighbors to consider, could change later #cut#
dist_cut=.02 #could change later #cut#
sat_cut=.95 #could change later #cut#
#if 'DATACONF' in os.environ and 'PIXSCALE' in os.environ and 'SATURATION' in os.environ:
# DATACONF=os.environ['DATACONF']
# PIXSCALE=float(os.environ['PIXSCALE'])
# SATURATION=os.environ['SATURATION']
#else:
# raise Exception("YOU MUST RUN THIS COMMAND FIRST: . /u/ki/awright/bonnpipeline/SUBARU.ini")
if 'PIXSCALE' in os.environ:
PIXSCALE=float(os.environ['PIXSCALE'])
else:
PIXSCALE=.202 #default to SUBARU 10_3 config
config=SEXDIR+'seeing_clearly.sex_coadd.config'
CATALOG_NAME=SEXDIR+'seeing_clearly-%s.cat' % (imagetools.id_generator(6),)
#set default sextractor parameters
if 'DETECT_MINAREA' not in kwargs: kwargs['DETECT_MINAREA']=2
if 'DETECT_THRESH' not in kwargs:
#if FILTER=="W-C-RC":
# kwargs['DETECT_THRESH']=10.0
# s2n_thresh=10.0
#elif FILTER=="W-J-B":
# kwargs['DETECT_THRESH']=7.0
# s2n_thresh=7.0
#adam-try# kwargs['DETECT_THRESH']=7.0
#adam-try# s2n_thresh=7.0
kwargs['DETECT_THRESH']=10.0
s2n_thresh=10.0
else:
s2n_thresh=kwargs['DETECT_THRESH']
if 'ANALYSIS_THRESH' not in kwargs:kwargs['ANALYSIS_THRESH']=kwargs['DETECT_THRESH']
FLAG_IMAGE=image.replace('.fits','.flag.fits')
if 'FLAG_IMAGE' not in kwargs and os.path.isfile(FLAG_IMAGE):
kwargs['FLAG_IMAGE']=FLAG_IMAGE
if 'FLAG_TYPE' not in kwargs:kwargs['FLAG_TYPE']= 'AND'
WEIGHT_IMAGE=image.replace('.fits','.weight.fits')
if 'WEIGHT_IMAGE' not in kwargs and os.path.isfile(WEIGHT_IMAGE):
kwargs['WEIGHT_IMAGE']=WEIGHT_IMAGE
if 'WEIGHT_TYPE' not in kwargs:kwargs['WEIGHT_TYPE']= 'NONE,MAP_WEIGHT'
#####RUN SEXTRACTOR#####
#2.2.2#sextractor(image,CATALOG_NAME=CATALOG_NAME,c=config,**kwargs)
if type(image)==string_ or type(image)==str:
res,RMS_back=sextract.sextractor(image,CATALOG_NAME=CATALOG_NAME,c=config,RMS_back=True,SEEING_FWHM=.7,**kwargs)#chose seeing=.7 since 2.2.2 requires a seeing value, could change later
back_rms,back=RMS_back
t=ascii.read(CATALOG_NAME,Reader=ascii.sextractor.SExtractor)
else:
all_images=image
all_back_rms=[]
all_ts=[]
for image_fl in all_images:
res,RMS_back_fl=sextract.sextractor(image_fl,CATALOG_NAME=CATALOG_NAME,c=config,RMS_back=True,SEEING_FWHM=.7,**kwargs)#chose seeing=.7 since 2.2.2 requires a seeing value, could change later
back_rms_fl,back_fl=RMS_back_fl
t_fl=ascii.read(CATALOG_NAME,Reader=ascii.sextractor.SExtractor)
all_back_rms.append(back_rms_fl)
all_ts.append(t_fl)
back_rms=mean(all_back_rms)
t=astropy.table.operations.vstack(all_ts)
#####CUT OUT SATURATED & ELONGATED THINGS#####
#sat_val=scoreatpercentile(t['FLUX_MAX'].data,87)
unsaturated=t['FLUX_MAX'].data<(t['FLUX_MAX'].data.max()*sat_cut) #chose sat_cut*100% of the max
#the ellipticity cut now depends on the number of stars that pass!
def rounder(min_frac_pass_el_cut=.35,el_cut_start=.1): #could change later #cut#
el_cut=el_cut_start
rounder_box=t['ELLIPTICITY'].data<el_cut #get objects that are sufficiently round
while rounder_box.mean()<min_frac_pass_el_cut: #while the ellipticity cut is removing too many objects
el_cut+=.01 #lower the standards on how elliptical the objects have to be in order to be considered as stars
try:
print "Hit ellipticity cut change for ",saveas.split('/')[-1]
except AttributeError:
pass
print "\tnow at %s" % (el_cut)
rounder_box=t['ELLIPTICITY'].data<el_cut
return rounder_box,el_cut
round_box,el_cut=rounder()
if round_box.sum()<200 and round_box.__len__()>300 and el_cut<.3:
print "Hit the ellipticity changing thing where I decide there needs to be more detections so I artificially change the el_cut...might want to change if I'm just adding in background by doing this!"
round_box,el_cut=rounder(min_frac_pass_el_cut=.5,el_cut_start=el_cut)
flagcut=(t['FLAGS']==0)*(t['IMAFLAGS_ISO']==0)
cut1spots=unsaturated*round_box*flagcut
t_better=t[cut1spots]
#####SET-UP WEIGHTING SCHEME SO THAT LARGER FLUX DETECTIONS CONTRIBUTE MORE TO THE FOM#####
#weight height in yy about these points!
yy=log10(t_better['FLUX_MAX'].data)
#wt_top_range,wt_bottom_range=scoreatpercentile(yy,95),scoreatpercentile(yy,5)
wt_top_range,wt_bottom_range=yy.max(),scoreatpercentile(yy,10)
wt_range= wt_top_range-wt_bottom_range
def wt_pts(x):
wt_val_at_bottom=.1 #could change later
wts=(1-wt_val_at_bottom)/wt_range*(x-wt_bottom_range)+wt_val_at_bottom
wts[wts<wt_val_at_bottom]=wt_val_at_bottom
wts[wts>1]=1
return wts
#####START PLOTTING#####
if checkplots:
f1=figure(figsize=(16,10))
ax1=f1.add_subplot(1,1,1)
f2=figure(figsize=(16,10))
ax2=f2.add_subplot(1,1,1)
#ax1.set_xlabel('different seeing guesses')
ax1.set_ylabel('Figure of Merit\nFOM=sum the of weights of objects with |FWHM-seeing|<%s' % (dist_cut),size=10)
ax1.set_title('"Seeing Clearly" calculation figure of merit vs. seeing')
#old#ax1.set_title('Plot Checking Seeing calculation\nsolid line=average distance bwtn NN and the seeing guess\ndashed line=1/width of NN flux distribution (normed to fit on these axes)')
cut2color=['b','g','r','c','m','y','orange','purple','k']
#####GET DATA AND LISTS NEEDED IN LOOP#####
s2n=t_better['FLUX_MAX'].data/back_rms
FWHM_IMAGE=t_better['FWHM_IMAGE'].data*PIXSCALE
plotspots=[]
seeings_wted=[]
Etype=0 #default is no error
xd,xu=.4,2.0 #xaxis limits
#####CALCULATE SEEING FOR DIFFERENT S2N CUT LEVELS UNTIL IT CONVERGES!#####
s2ncut_levels=arange(20,36,3)
for cut_i,s2ncut in enumerate(s2ncut_levels):
cut2spots=s2n>s2ncut
Nstars=cut2spots.sum()
if Nstars<number_NN:
try:
print "%s = Nstars<number_NN = %s" % (Nstars,number_NN)
if seeings_wted[-2]-seeing_wted<.03: #change back to .02
Etype=1
seeing_final_wted=seeing_wted
break
else:
Etype=2
print "cut2spots.sum()<number_NN without converging, returning nan!"
seeing_final_wted=nan
break
except IndexError:
##if this happens maybe I should take a 2nd look at the saturation and ellipticity cut?
raise Exception("if this happens maybe I should take a 2nd look at the saturation and ellipticity cut?")
#if cut_i==0:
# Etype=3
# print "cut2spots.sum()<number_NN on first loop, returning nan!"
# seeing_final_wted=nan
# break
#elif cut_i==1:
# Etype=4
# print "cut2spots.sum()<number_NN on 2nd loop, returning value from 1st loop!"
# seeing_final_wted=seeing
# break
fwhm=FWHM_IMAGE[cut2spots]
yy_pc2=yy[cut2spots]
foms_wted=[]
for s in steps:
diffs=fwhm-s
#this is the Apr 7th way of doing things: by picking out the closest 200 pts
#old#negatives=ma.masked_array(diffs,diffs>0)
#old#positives=ma.masked_array(diffs,diffs<0)
#old#diffargs=append(negatives.argsort()[-number_NN/2:],positives.argsort()[0:number_NN/2])
#go out a distance from the bin center and count the number of stars in there, multiplied by their weight
cut3spots=diffs.__abs__()<dist_cut #IR= In Region
IR_flux=yy_pc2[cut3spots]
IR_Y_wts=wt_pts(IR_flux)
IR_absdiffs=diffs.__abs__()[cut3spots]
IR_X_wts=1-IR_absdiffs/dist_cut
IR_wts=IR_X_wts*IR_Y_wts
IR_fom_wted=(IR_wts).sum() #minmax
foms_wted.append(IR_fom_wted)
foms_wted=asarray(foms_wted)
#get seeing_wted for this s2n iteration from fom_wted min
seebin_wted=foms_wted.argmax()#tmp #minmax
seeing_wted=steps[seebin_wted]
seeings_wted.append(seeing_wted)
#handle plotting
if checkplots:
#lets get the spots for plotting purposes
diffs=fwhm-seeing_wted
cut3spots=diffs.__abs__()<dist_cut #IR= In Region
finalspots=cattools.SpotCutter([cut1spots,cut2spots,cut3spots])
plotspots.append(finalspots)
#lets plot the stuff on the top axis
ax1.plot(steps,foms_wted,cut2color[cut_i],label='SNR>%s' % (s2ncut,))
ax1.legend();ax1.set_xlim(xd,xu)
#handle convergence and asigning final measured seeing value
if cut_i>0:
#fix: need to make convergence a little more rigorous
#if converged get final seeing from seeings_wted
if seeings_wted[-2]==seeing_wted:
seeing_final_wted=seeing_wted
#print "would have final seeing after %s loops: SEEING=%s" % (cut_i,seeing)
elif cut_i==5 and abs(seeings_wted[-2]-seeing_wted)<.02:
try:seeing_final_wted
except NameError:
seeing_final_wted=.5*(seeings_wted[-2]+seeing_wted)
else:
try:seeing_final_wted
except NameError:
seeing_final_wted=nan
Etype=6
###elif cut_i>=5: #runs at end of s2ncut loop!
#else: this only runs if you don't hit a break, but I want it to run every time!
try:
maybe_missed_stars=fwhm<(seeing_final_wted-dist_cut)
num_mms= maybe_missed_stars.sum()
frac_mms= maybe_missed_stars.mean()
localmaxs_wted=signal.argrelmax(foms_wted)[0] #minmax
vals_at_localmaxs=foms_wted[localmaxs_wted]
num_minima=len(localmaxs_wted)
#ismax=vals_at_localmaxs==foms_wted.max()
#aftermax=logical_not(ismax.cumsum())
#vals_aftermax=vals_at_localmaxs[aftermax]
#inds_aftermax=localmaxs_wted[aftermax]
ss=list(steps);final_ind=ss.index(seeing_final_wted)
b4max=localmaxs_wted<final_ind
vals_b4max=vals_at_localmaxs[b4max]
inds_b4max=localmaxs_wted[b4max]
#what if I tried to make this a little better! I'll choose the biggest peak, then see if there are others
fom1=vals_at_localmaxs.max()
peaks_high_enough=vals_b4max>fom1*.51
Ncandidates= peaks_high_enough.sum()
#here I pick out the LARGEST peak b4 the chosen peak.
ind_peak_b4max=vals_b4max.argmax()
seeing2=steps[inds_b4max[ind_peak_b4max]]
fom2=vals_b4max.max()
if Ncandidates>1:
#it picks out the FIRST peak that passes the cut and is also within 80% of the 2nd peak
fomcut=max([fom2*.8,fom1*.51])
peaks_high_enough=vals_b4max>fomcut
ind_peak_b4max= peaks_high_enough.argmax()
seeing2=steps[inds_b4max[ind_peak_b4max]]
fom2=vals_b4max[ind_peak_b4max]
if saveas.endswith(".png"):
saveas=saveas[:-4]+"-else-not2ndpeak"+".png"
else:
saveas+="-else-not2ndpeak"
#b4-was-working-fine#fom1=vals_at_localmaxs.max()
ratio=fom2/fom1
ind2=inds_b4max[vals_b4max.argmax()]
ind1=localmaxs_wted[vals_at_localmaxs.argmax()]
elseinfo=(num_mms,frac_mms,ratio,seeing2,seeing_wted)
seeoff=seeing_wted-seeing2
#if all of these conditions are true, then take the first peak instead of the highest peak!
if (seeoff<.28)*(frac_mms>.1)*(num_mms>39)*(ratio>.51): #maybe r=.4, .45, or .5 instead
seeing_final_wted=seeing2
print "maybe_missed_stars.sum() =",maybe_missed_stars.sum()
print "maybe_missed_stars.mean()=",maybe_missed_stars.mean()
if saveas.endswith(".png"):
saveas=saveas[:-4]+"-else-rpt51"+".png"
else:
saveas+="-else-rpt51"
except ValueError:
elseinfo=(nan,nan,nan,nan,nan)
#END s2n cut loop
namespace.update(locals())
os.system('rm '+CATALOG_NAME)
try:
if checkplots:
t['FWHM_IMAGE']*=PIXSCALE
ax2=BartStar.ShowStars(t,subsample_bools=tuple(plotspots),axes=('FWHM_IMAGE','FLUX_MAX'),ax=ax2,subsample_name='ellipticity<%s & flux<%s*max(flux) & w/in %s of bin center' % (el_cut,sat_cut,dist_cut),first_diff=0)
ax2.set_xlim(xd,xu)
yu2,yd2=ax2.get_ylim()
ax2.plot([seeing_final_wted,seeing_final_wted],[yu2,yd2],'k--',label='SC wted')
try:
head_see=pyfits.open(image)[0].header['SEEING']
ax2.plot([head_see,head_see],[yu2,yd2],'purple',alpha=.5,label='Header See')
except:
print "HEADER SEEING WASN'T THERE!"
pass
ax2.plot([xd,xu],[wt_bottom_range,wt_bottom_range],'r',ls=":",label='range')
ax2.plot([xd,xu],[wt_top_range,wt_top_range],'r',ls=":")
yu1,yd1=ax1.get_ylim()
ax1.plot([seeing_final_wted,seeing_final_wted],[yu1,yd1],'k--')
ax2.text(xd+.03*(xu-xd),yd2+.1*(yu2-yd2),'seeings_wted='+(len(seeings_wted)*'%.2f,' % tuple(seeings_wted))[:-1])
ax2.legend()
if saveas:
print "############### OUTPUT PLOT HERE: ###################"
print "### ", saveas
print "### ", saveas.replace('.png','_FoM.png')
print "############### CHECK IT OUT! ###################\n"
if '/' not in saveas: saveas=PLOTDIR+saveas
f2.savefig(saveas)
f1.savefig(saveas.replace('.png','_FoM.png'))
if Etype>0:
errfl=open('/u/ki/awright/InstallingSoftware/pythons/sextractimtools/seeing_clearly_errors.log','a+')
#errfl=open('/u/ki/awright/InstallingSoftware/pythons/sextractimtools/seeing_clearly_errors-ellcutpt1.log','a+')
errfl.write('\n#################################################################\n')
for varname,varval in zip(['image','Etype','cut_i','seeings_wted','el_cut','Nstars','round.mean()','unsaturated.mean()','round.sum()','unsaturated.sum()'],[image,Etype,cut_i,seeings_wted,el_cut,Nstars,round_box.mean(),unsaturated.mean(),round_box.sum(),unsaturated.sum()]):
errfl.write('%17s = ' % (varname,) + str(varval)+ '\n')
errfl.write('#################################################################\n')
errfl.flush()
errfl.close()
namespace.update(locals())
try:
return seeing_final_wted, array(all_back_rms)
except NameError:
return seeing_final_wted,back_rms
#return seeing_final_wted,seeings_wted,elseinfo
except NameError:
#changed things so that convergence should be easier, so if we hit this error it's bad
print "changed things so that convergence should be easier, so if we hit this error it's bad"
namespace.update(locals())
raise
print "Finished without converging, returning nan!"
Etype=7
errfl=open('/u/ki/awright/InstallingSoftware/pythons/sextractimtools/seeing_clearly_errors.log','a+')
#errfl=open('/u/ki/awright/InstallingSoftware/pythons/sextractimtools/seeing_clearly_errors-ellcutpt1.log','a+')
errfl.write('\n#################################################################\n')
for varname,varval in zip(['image','Etype','cut_i','seeings_wted','el_cut','Nstars','round.mean()','unsaturated.mean()','round.sum()','unsaturated.sum()'],[image,Etype,cut_i,seeings_wted,el_cut,Nstars,round_box.mean(),unsaturated.mean(),round_box.sum(),unsaturated.sum()]):
errfl.write('%17s = ' % (varname,) + str(varval)+ '\n')
errfl.write('#################################################################\n')
errfl.flush()
#tmp: I'm not doing anything here because this hasn't happened yet. I'll cross that bridge later if I come to it
try:
return nan,all_back_rms
except NameError:
return nan,back_rms
except:
namespace.update(locals())
raise
if __name__ == "__main__":
from adam_quicktools_ArgCleaner import ArgCleaner
args=ArgCleaner(sys.argv)
#for false_arg in ['-i', '--']:
# if false_arg in args: args.remove(false_arg)
if len(args)<1:
sys.exit()
if not os.path.isfile(args[0]):
print "args[0]=",args[0]
raise Exception(args[0]+" is not a file!")
else:
img_name=args[0]
saveas=img_name.replace('.fits','_SeeingClearly.png')
print "Using SeeingClearly to get seeing for: "+img_name
seeing,back_rms=seeing_clearly_withplot(img_name,checkplots=1,saveas=saveas)
print "RMS of the background in this image is: "+str(back_rms)+"\n\n SeeingClearly.seeing_clearly got (in arcseconds): Seeing="+str(seeing)
#show()
fo=pyfits.open(img_name,'update')
fo.verify('fix')
fo[0].header['SEEING']=round(seeing,3)
print "SEEING in header: ",round(seeing,3)
fo.flush()
fo.close()
| mit |
hep-cce/ml_classification_studies | cosmoDNN/Regression/train.py | 2 | 6820 | '''
Author: Nesar Ramachandra
Implementation of regression using ConvNet.
Inverse problem finds 5 parameters corresponding to each image
Make changes to include number of parameters to be backtracked, and which ones
'''
import numpy as np
import load_train_data
from model_architectures import basic_model
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, RMSprop, Adadelta, Adam
from keras import backend as K
K.set_image_dim_ordering('tf')
import time
#import tensorflow as tf
#sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
from keras.preprocessing.image import ImageDataGenerator
data_augmentation = True
time_i = time.time()
Dir0 = '../../../../'
Dir1 = Dir0+'AllTrainTestSets/JPG/'
Dir2 = ['single/', 'stack/'][1]
Dir3 = ['0/', '1/'][1]
data_path = Dir1 + Dir2 + Dir3 + 'TrainingData/'
names = ['lensed', 'unlensed']
data_dir_list = ['lensed_outputs', 'unlensed_outputs']
num_epoch = 300
batch_size = 256
learning_rate = 0.001 # Warning: lr and decay vary across optimizers
decay_rate = 0.1
opti_id = 1 # [SGD, Adam, Adadelta, RMSprop]
loss_id = 0 # [mse, mae] # mse is always better
image_size = img_rows = 45
img_cols = 45
num_channel = 1
num_classes = 2
num_files = 8000*num_classes
train_split = 0.8 # 80 percent
num_train = int(train_split*num_files)
num_para = 5
'''
def load_train():
img_data_list = []
# labels = []
# for name in names:
for labelID in [0, 1]:
name = names[labelID]
for img_ind in range( int(num_files / num_classes) ):
input_img = np.load(data_path + '/' + name + '_outputs/' + name + str(img_ind) + '.npy')
if np.isnan(input_img).any():
print(labelID, img_ind, ' -- ERROR: NaN')
else:
img_data_list.append(input_img)
# labels.append([labelID, 0.5*labelID, 0.33*labelID, 0.7*labelID, 5.0*labelID] )
img_data = np.array(img_data_list)
img_data = img_data.astype('float32')
# labels = np.array(labels)
# labels = labels.astype('float32')
img_data /= 255
print (img_data.shape)
if num_channel == 1:
if K.image_dim_ordering() == 'th':
img_data = np.expand_dims(img_data, axis=1)
print (img_data.shape)
else:
img_data = np.expand_dims(img_data, axis=4)
print (img_data.shape)
else:
if K.image_dim_ordering() == 'th':
img_data = np.rollaxis(img_data, 3, 1)
print (img_data.shape)
X_train = img_data
# y_train = np_utils.to_categorical(labels, num_classes)
labels = np.load(Dir1 + Dir2 + Dir3 + 'Train5para.npy')
# print labels1.shape
print(labels.shape)
para5 = labels[:,2:]
np.random.seed(12345)
shuffleOrder = np.arange(X_train.shape[0])
np.random.shuffle(shuffleOrder)
X_train = X_train[shuffleOrder]
y_train = para5[shuffleOrder]
# print y_train[0:10]
# print y_train[0:10]
return X_train, y_train
def read_and_normalize_train_data():
train_data, train_target = load_train()
train_data = np.array(train_data, dtype=np.float32)
train_target = np.array(train_target, dtype=np.float32)
m = train_data.mean()
s = train_data.std()
print('Train mean, sd:', m, s )
train_data -= m
train_data /= s
print('Train shape:', train_data.shape)
print(train_data.shape[0], 'train samples')
return train_data, train_target
train_data, train_target = read_and_normalize_train_data()
X_train = train_data[0:num_train,:,:,:]
y_train = train_target[0:num_train]
X_test = train_data[num_train:num_files,:,:,:]
y_test = train_target[num_train:num_files]
'''
##-------------------------------------------------------------------------------------
## Load data
lens = load_train_data.LensData(data_path = Dir1 + Dir2 + Dir3 + 'TrainingData/')
(X_train, y_train), (X_test, y_test) = lens.load_data()[:100]
##-------------------------------------------------------------------------------------
model = create_model()
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=180, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=True) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)
# Fit the model on the batches generated by datagen.flow().
ModelFit = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
steps_per_epoch=X_train.shape[0] // batch_size,
epochs=num_epoch, verbose = 2,
validation_data= (X_test, y_test ) )
plotLossAcc = False
if plotLossAcc:
import matplotlib.pylab as plt
train_loss= ModelFit.history['loss']
val_loss= ModelFit.history['val_loss']
# train_acc= ModelFit.history['acc']
# val_acc= ModelFit.history['val_acc']
epochs= range(1, num_epoch+1)
fig, ax = plt.subplots(1,1, sharex= True, figsize = (7,5))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace= 0.02)
ax.plot(epochs,train_loss)
ax.plot(epochs,val_loss)
ax.set_ylabel('loss')
# ax[0].set_ylim([0,1])
# ax[0].set_title('Loss')
ax.legend(['train_loss','val_loss'])
# accuracy doesn't make sense for regression
plt.show()
SaveModel = True
if SaveModel:
epochs = np.arange(1, num_epoch+1)
train_loss = ModelFit.history['loss']
val_loss = ModelFit.history['val_loss']
training_hist = np.vstack([epochs, train_loss, val_loss])
fileOut = 'DeeperRegressionStack_opti' + str(opti_id) + '_loss' + str(loss_id) + '_lr' + str(learning_rate) + '_decay' + str(decay_rate) + '_batch' + str(batch_size) + '_epoch' + str(num_epoch)
print(fileOut)
model.save('../../ModelOutRegression/' + fileOut + '.hdf5')
np.save('../../ModelOutRegression/'+fileOut+'.npy', training_hist)
time_j = time.time()
print(time_j - time_i, 'seconds')
| gpl-3.0 |
catmiao/trading-with-python | lib/interactiveBrokers/histData.py | 76 | 6472 | '''
Created on May 8, 2013
Copyright: Jev Kuznetsov
License: BSD
Module for downloading historic data from IB
'''
import ib
import pandas as pd
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
import logger as logger
from pandas import DataFrame, Index
import os
import datetime as dt
import time
from time import sleep
from extra import timeFormat, dateFormat
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pd.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1 D',barSizeSetting='30 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
if isinstance(endDateTime,dt.datetime): # convert to string
endDateTime = endDateTime.strftime(timeFormat)
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
# def getIntradayData(self,contract, dateTuple ):
# ''' get full day data on 1-s interval
# date: a tuple of (yyyy,mm,dd)
# '''
#
# openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
# closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
#
# timeRange = pd.date_range(openTime,closeTime,freq='30min')
#
# datasets = []
#
# for t in timeRange:
# datasets.append(self.requestData(contract,t.strftime(timeFormat)))
#
# return pd.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class _HistDataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date,dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class TimeKeeper(object):
'''
class for keeping track of previous requests, to satify the IB requirements
(max 60 requests / 10 min)
each time a requiest is made, a timestamp is added to a txt file in the user dir.
'''
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~')+'/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir,'requests.txt'))
# Create file if it's missing
if not os.path.exists(self.dataFile):
open(self.dataFile,'w').close()
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
''' adds a timestamp of current request'''
with open(self.dataFile,'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat)+'\n')
def nrRequests(self,timeSpan=600):
''' return number of requests in past timespan (s) '''
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile,'r') as f:
lines = f.readlines()
for line in lines:
if now-dt.datetime.strptime(line.strip(),self._timeFormat) < delta:
requests+=1
if requests==0: # erase all contents if no requests are relevant
open(self.dataFile,'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
if __name__ == '__main__':
from extra import createContract
dl = Downloader(debug=True) # historic data downloader class
contract = createContract('SPY') # create contract using defaults (STK,SMART,USD)
data = dl.requestData(contract,"20141208 16:00:00 EST") # request 30-second data bars up till now
data.to_csv('SPY.csv') # write data to csv
print 'Done' | bsd-3-clause |
has2k1/plydata | plydata/dataframe/two_table.py | 1 | 1485 | """
Two table verb implementations for a :class:`pandas.DataFrame`
"""
import pandas as pd
from ..types import GroupedDataFrame
from ..operators import register_implementations
__all__ = [
'inner_join', 'outer_join', 'left_join',
'right_join', 'anti_join', 'semi_join'
]
def inner_join(verb):
verb.kwargs['how'] = 'inner'
return _join(verb)
def outer_join(verb):
verb.kwargs['how'] = 'outer'
return _join(verb)
def left_join(verb):
verb.kwargs['how'] = 'left'
return _join(verb)
def right_join(verb):
verb.kwargs['how'] = 'right'
return _join(verb)
def anti_join(verb):
verb.kwargs['how'] = 'left'
verb.kwargs['suffixes'] = ('', '_y')
verb.kwargs['indicator'] = '_plydata_merge'
df = _join(verb)
data = df.query('_plydata_merge=="left_only"')[verb.x.columns]
data._is_copy = None
return data
def semi_join(verb):
verb.kwargs['how'] = 'left'
verb.kwargs['suffixes'] = ('', '_y')
verb.kwargs['indicator'] = '_plydata_merge'
df = _join(verb)
data = df.query('_plydata_merge=="both"')[verb.x.columns]
data._is_copy = None
data.drop_duplicates(inplace=True)
return data
def _join(verb):
"""
Join helper
"""
data = pd.merge(verb.x, verb.y, **verb.kwargs)
# Preserve x groups
if isinstance(verb.x, GroupedDataFrame):
data.plydata_groups = list(verb.x.plydata_groups)
return data
register_implementations(globals(), __all__, 'dataframe')
| bsd-3-clause |
jesusbriales/rgbd_benchmark_tools | src/rgbd_benchmark_tools/evaluate_rpe.py | 1 | 18184 | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This script computes the relative pose error from the ground truth trajectory
and the estimated trajectory.
"""
import argparse
import random
import numpy as np
import sys
import h5py
_EPS = np.finfo(float).eps * 4.0
def transform44(l):
"""
Generate a 4x4 homogeneous transformation matrix from a 3D point and unit quaternion.
Input:
l -- tuple consisting of (stamp,tx,ty,tz,qx,qy,qz,qw) where
(tx,ty,tz) is the 3D position and (qx,qy,qz,qw) is the unit quaternion.
Output:
matrix -- 4x4 homogeneous transformation matrix
"""
t = l[1:4]
q = np.array(l[4:8], dtype=np.float64, copy=True)
nq = np.dot(q, q)
if nq < _EPS:
return np.array((
( 1.0, 0.0, 0.0, t[0])
( 0.0, 1.0, 0.0, t[1])
( 0.0, 0.0, 1.0, t[2])
( 0.0, 0.0, 0.0, 1.0)
), dtype=np.float64)
q *= np.sqrt(2.0 / nq)
q = np.outer(q, q)
return np.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], t[0]),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], t[1]),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], t[2]),
( 0.0, 0.0, 0.0, 1.0)
), dtype=np.float64)
def read_trajectory(filename, matrix=True):
"""
Read a trajectory from a text file.
Input:
filename -- file to be read
matrix -- convert poses to 4x4 matrices
Output:
dictionary of stamped 3D poses
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list_ok = []
for i,l in enumerate(list):
if l[4:8]==[0,0,0,0]:
continue
isnan = False
for v in l:
if np.isnan(v):
isnan = True
break
if isnan:
sys.stderr.write("Warning: line %d of file '%s' has NaNs, skipping line\n"%(i,filename))
continue
list_ok.append(l)
if matrix :
traj = dict([(l[0],transform44(l[0:])) for l in list_ok])
else:
traj = dict([(l[0],l[1:8]) for l in list_ok])
return traj
def read_trajectory_h5(dset, matrix=True):
"""
Read a trajectory from a text file.
Input:
dset -- a HDF5 dataset of dimensions 8xN with trajectory poses in TUM format
matrix -- convert poses to 4x4 matrices
Output:
dictionary of stamped 3D poses
"""
list_ok = []
N = dset.shape[1];
for j in range(0,N):
l = dset[:,j].tolist();
if l==[0,0,0,0]:
continue
isnan = False
for v in l:
if np.isnan(v):
isnan = True
break
if isnan:
sys.stderr.write("Warning: line %d of file '%s' has NaNs, skipping line\n"%(i,filename))
continue
list_ok.append(l)
if matrix :
traj = dict([(l[0],transform44(l[0:])) for l in list_ok])
else:
traj = dict([(l[0],l[1:8]) for l in list_ok])
return traj
def find_closest_index(L,t):
"""
Find the index of the closest value in a list.
Input:
L -- the list
t -- value to be found
Output:
index of the closest element
"""
beginning = 0
difference = abs(L[0] - t)
best = 0
end = len(L)
while beginning < end:
middle = int((end+beginning)/2)
if abs(L[middle] - t) < difference:
difference = abs(L[middle] - t)
best = middle
if t == L[middle]:
return middle
elif L[middle] > t:
end = middle
else:
beginning = middle + 1
return best
def ominus(a,b):
"""
Compute the relative 3D transformation between a and b.
Input:
a -- first pose (homogeneous 4x4 matrix)
b -- second pose (homogeneous 4x4 matrix)
Output:
Relative 3D transformation from a to b.
"""
return np.dot(np.linalg.inv(a),b)
def scale(a,scalar):
"""
Scale the translational components of a 4x4 homogeneous matrix by a scale factor.
"""
return np.array(
[[a[0,0], a[0,1], a[0,2], a[0,3]*scalar],
[a[1,0], a[1,1], a[1,2], a[1,3]*scalar],
[a[2,0], a[2,1], a[2,2], a[2,3]*scalar],
[a[3,0], a[3,1], a[3,2], a[3,3]]]
)
def compute_distance(transform):
"""
Compute the distance of the translational component of a 4x4 homogeneous matrix.
"""
return np.linalg.norm(transform[0:3,3])
def compute_angle(transform):
"""
Compute the rotation angle from a 4x4 homogeneous matrix.
"""
# an invitation to 3-d vision, p 27
return np.arccos( min(1,max(-1, (np.trace(transform[0:3,0:3]) - 1)/2) ))
def distances_along_trajectory(traj):
"""
Compute the translational distances along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_distance(t)
distances.append(sum)
return distances
def rotations_along_trajectory(traj,scale):
"""
Compute the angular rotations along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_angle(t)*scale
distances.append(sum)
return distances
def evaluate_trajectory(traj_gt,traj_est,param_max_pairs=10000,param_fixed_delta=False,param_delta=1.00,param_delta_unit="s",param_offset=0.00,param_scale=1.00):
"""
Compute the relative pose error between two trajectories.
Input:
traj_gt -- the first trajectory (ground truth)
traj_est -- the second trajectory (estimated trajectory)
param_max_pairs -- number of relative poses to be evaluated
param_fixed_delta -- false: evaluate over all possible pairs
true: only evaluate over pairs with a given distance (delta)
param_delta -- distance between the evaluated pairs
param_delta_unit -- unit for comparison:
"s": seconds
"m": meters
"rad": radians
"deg": degrees
"f": frames
param_offset -- time offset between two trajectories (to model the delay)
param_scale -- scale to be applied to the second trajectory
Output:
list of compared poses and the resulting translation and rotation error
"""
stamps_gt = list(traj_gt.keys())
stamps_est = list(traj_est.keys())
stamps_gt.sort()
stamps_est.sort()
stamps_est_return = []
for t_est in stamps_est:
t_gt = stamps_gt[find_closest_index(stamps_gt,t_est + param_offset)]
t_est_return = stamps_est[find_closest_index(stamps_est,t_gt - param_offset)]
t_gt_return = stamps_gt[find_closest_index(stamps_gt,t_est_return + param_offset)]
if not t_est_return in stamps_est_return:
stamps_est_return.append(t_est_return)
if(len(stamps_est_return)<2):
raise Exception("Number of overlap in the timestamps is too small. Did you run the evaluation on the right files?")
if param_delta_unit=="s":
index_est = list(traj_est.keys())
index_est.sort()
elif param_delta_unit=="m":
index_est = distances_along_trajectory(traj_est)
elif param_delta_unit=="rad":
index_est = rotations_along_trajectory(traj_est,1)
elif param_delta_unit=="deg":
index_est = rotations_along_trajectory(traj_est,180/np.pi)
elif param_delta_unit=="f":
index_est = range(len(traj_est))
else:
raise Exception("Unknown unit for delta: '%s'"%param_delta_unit)
if not param_fixed_delta:
if(param_max_pairs==0 or len(traj_est)<np.sqrt(param_max_pairs)):
pairs = [(i,j) for i in range(len(traj_est)) for j in range(len(traj_est))]
else:
pairs = [(random.randint(0,len(traj_est)-1),random.randint(0,len(traj_est)-1)) for i in range(param_max_pairs)]
else:
pairs = []
for i in range(len(traj_est)):
j = find_closest_index(index_est,index_est[i] + param_delta)
if j!=len(traj_est)-1:
pairs.append((i,j))
if(param_max_pairs!=0 and len(pairs)>param_max_pairs):
pairs = random.sample(pairs,param_max_pairs)
gt_interval = np.median([s-t for s,t in zip(stamps_gt[1:],stamps_gt[:-1])])
gt_max_time_difference = 2*gt_interval
result = []
for i,j in pairs:
stamp_est_0 = stamps_est[i]
stamp_est_1 = stamps_est[j]
stamp_gt_0 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_0 + param_offset) ]
stamp_gt_1 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_1 + param_offset) ]
if(abs(stamp_gt_0 - (stamp_est_0 + param_offset)) > gt_max_time_difference or
abs(stamp_gt_1 - (stamp_est_1 + param_offset)) > gt_max_time_difference):
continue
error44 = ominus( scale(
ominus( traj_est[stamp_est_1], traj_est[stamp_est_0] ),param_scale),
ominus( traj_gt[stamp_gt_1], traj_gt[stamp_gt_0] ) )
trans = compute_distance(error44)
rot = compute_angle(error44)
result.append([stamp_est_0,stamp_est_1,stamp_gt_0,stamp_gt_1,trans,rot])
if len(result)<2:
raise Exception("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory!")
return result
def percentile(seq,q):
"""
Return the q-percentile of a list
"""
seq_sorted = list(seq)
seq_sorted.sort()
return seq_sorted[int((len(seq_sorted)-1)*q)]
if __name__ == '__main__':
random.seed(0)
parser = argparse.ArgumentParser(description='''
This script computes the relative pose error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('groundtruth_file', help='ground-truth trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('estimated_file', help='estimated trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('--h5file', help='HDF5 file in which the trajectories are stored as datasets (format: 8xN dataset, with "timestamp tx ty tz qx qy qz qw" as rows)',default="")
parser.add_argument('--max_pairs', help='maximum number of pose comparisons (default: 10000, set to zero to disable downsampling)', default=10000)
parser.add_argument('--fixed_delta', help='only consider pose pairs that have a distance of delta delta_unit (e.g., for evaluating the drift per second/meter/radian)', action='store_true')
parser.add_argument('--delta', help='delta for evaluation (default: 1.0)',default=1.0)
parser.add_argument('--delta_unit', help='unit of delta (options: \'s\' for seconds, \'m\' for meters, \'rad\' for radians, \'f\' for frames; default: \'s\')',default='s')
parser.add_argument('--offset', help='time offset between ground-truth and estimated trajectory (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the estimated trajectory (default: 1.0)',default=1.0)
parser.add_argument('--save', help='text file to which the evaluation will be saved (format: stamp_est0 stamp_est1 stamp_gt0 stamp_gt1 trans_error rot_error)')
parser.add_argument('--plot', help='plot the result to a file (requires --fixed_delta, output format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the mean translational error measured in meters will be printed)', action='store_true')
args = parser.parse_args()
if args.plot and not args.fixed_delta:
sys.exit("The '--plot' option can only be used in combination with '--fixed_delta'")
if args.h5file == "":
traj_gt = read_trajectory(args.groundtruth_file)
traj_est = read_trajectory(args.estimated_file)
else:
h5f = h5py.File(args.h5file,'a')
dset_gt = h5f[args.groundtruth_file]
dset_est = h5f[args.estimated_file]
traj_gt = read_trajectory_h5(dset_gt)
traj_est = read_trajectory_h5(dset_est)
result = evaluate_trajectory(traj_gt,
traj_est,
int(args.max_pairs),
args.fixed_delta,
float(args.delta),
args.delta_unit,
float(args.offset),
float(args.scale))
stamps = np.array(result)[:,0]
trans_error = np.array(result)[:,4]
rot_error = np.array(result)[:,5]
if args.h5file != "":
# Create subgroup in the parent group of the estimation dataset
sample_group = dset_est.parent
eval_group = sample_group.require_group('eval/'+args.delta_unit)
# Save the evaluation vectors as datasets in the eval group
eval_group.create_dataset('stamps', data=stamps)
eval_group.create_dataset('allStamps', data=np.array(result)[:,0:4])
eval_group.create_dataset('trans_error', data=trans_error)
eval_group.create_dataset('rot_error', data=rot_error)
# Save the evaluation metrics as datasets of a single value in the eval group
t_rmse = np.sqrt(np.dot(trans_error,trans_error) / len(trans_error))
t_mean = np.mean(trans_error)
t_median = np.median(trans_error)
t_max = np.max(trans_error)
eval_group.create_dataset('t_rmse', data=t_rmse )
eval_group.create_dataset('t_mean', data=t_mean )
eval_group.create_dataset('t_median', data=t_median )
eval_group.create_dataset('t_max', data=t_max )
r_rmse = np.sqrt(np.dot(rot_error,rot_error) / len(rot_error)) * 180.0 / np.pi
r_mean = np.mean(rot_error) * 180.0 / np.pi
r_median = np.median(rot_error) * 180.0 / np.pi
r_max = np.max(rot_error) * 180.0 / np.pi
eval_group.create_dataset('r_rmse', data=r_rmse )
eval_group.create_dataset('r_mean', data=r_mean )
eval_group.create_dataset('r_median', data=r_median )
eval_group.create_dataset('r_max', data=r_max )
if args.save:
f = open(args.save,"w")
f.write("\n".join([" ".join(["%f"%v for v in line]) for line in result]))
f.close()
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "translational_error.rmse %f m"%np.sqrt(np.dot(trans_error,trans_error) / len(trans_error))
print "translational_error.mean %f m"%np.mean(trans_error)
print "translational_error.median %f m"%np.median(trans_error)
print "translational_error.std %f m"%np.std(trans_error)
print "translational_error.min %f m"%np.min(trans_error)
print "translational_error.max %f m"%np.max(trans_error)
print "rotational_error.rmse %f deg"%(np.sqrt(np.dot(rot_error,rot_error) / len(rot_error)) * 180.0 / np.pi)
print "rotational_error.mean %f deg"%(np.mean(rot_error) * 180.0 / np.pi)
print "rotational_error.median %f deg"%(np.median(rot_error) * 180.0 / np.pi)
print "rotational_error.std %f deg"%(np.std(rot_error) * 180.0 / np.pi)
print "rotational_error.min %f deg"%(np.min(rot_error) * 180.0 / np.pi)
print "rotational_error.max %f deg"%(np.max(rot_error) * 180.0 / np.pi)
else:
print np.mean(trans_error)
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(stamps - stamps[0],trans_error,'-',color="blue")
#ax.plot([t for t,e in err_rot],[e for t,e in err_rot],'-',color="red")
ax.set_xlabel('time [s]')
ax.set_ylabel('translational error [m]')
plt.savefig(args.plot,dpi=300)
| bsd-2-clause |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_fdr_stats_evoked.py | 24 | 2752 | """
=======================================
FDR correction on T-test on sensor data
=======================================
One tests if the evoked response significantly deviates from 0.
Multiple comparison problem is addressed with
False Discovery Rate (FDR) correction.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.stats import bonferroni_correction, fdr_correction
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)[:30]
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
X = epochs.get_data() # as 3D matrix
X = X[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
T, pval = stats.ttest_1samp(X, 0)
alpha = 0.05
n_samples, n_tests = X.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
threshold_fdr = np.min(np.abs(T)[reject_fdr])
###############################################################################
# Plot
times = 1e3 * epochs.times
plt.close('all')
plt.plot(times, T, 'k', label='T-stat')
xmin, xmax = plt.xlim()
plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k',
label='p=0.05 (uncorrected)', linewidth=2)
plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r',
label='p=0.05 (Bonferroni)', linewidth=2)
plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b',
label='p=0.05 (FDR)', linewidth=2)
plt.legend()
plt.xlabel("Time (ms)")
plt.ylabel("T-stat")
plt.show()
| bsd-3-clause |
pompiduskus/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
ChinaQuants/zipline | tests/test_rolling_panel.py | 12 | 7118 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import MutableIndexRollingPanel, RollingPanel
from zipline.finance.trading import TradingEnvironment
class TestRollingPanel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
@classmethod
def tearDownClass(cls):
del cls.env
def test_alignment(self):
items = ('a', 'b')
sids = (1, 2)
dts = self.env.market_minute_window(
self.env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts[2:],
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
rp.extend_back(dts[:-2])
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts,
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
def test_get_current_multiple_call_same_tick(self):
"""
In old get_current, each call the get_current would copy the data. Thus
changing that object would have no side effects.
To keep the same api, make sure that the raw option returns a copy too.
"""
def data_id(values):
return values.__array_interface__['data']
items = ('a', 'b')
sids = (1, 2)
dts = self.env.market_minute_window(
self.env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
# each get_current call makea a copy
cur = rp.get_current()
cur2 = rp.get_current()
assert data_id(cur.values) != data_id(cur2.values)
# make sure raw follow same logic
raw = rp.get_current(raw=True)
raw2 = rp.get_current(raw=True)
assert data_id(raw) != data_id(raw2)
class TestMutableIndexRollingPanel(unittest.TestCase):
def test_basics(self, window=10):
items = ['bar', 'baz', 'foo']
minor = ['A', 'B', 'C', 'D']
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque(maxlen=window)
frames = {}
for i, date in enumerate(dates):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def test_adding_and_dropping_items(self, n_items=5, n_minor=10, window=10,
periods=30):
np.random.seed(123)
items = deque(range(n_items))
minor = deque(range(n_minor))
expected_items = deque(range(n_items))
expected_minor = deque(range(n_minor))
first_non_existant = max(n_items, n_minor) + 1
# We want to add new columns with random order
add_items = np.arange(first_non_existant, first_non_existant + periods)
np.random.shuffle(add_items)
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
expected_frames = deque(maxlen=window)
expected_dates = deque()
for i, (date, add_item) in enumerate(zip(dates, add_items)):
frame = pd.DataFrame(np.random.randn(n_items, n_minor),
index=items, columns=minor)
if i >= window:
# Old labels and dates should start to get dropped at every
# call
del frames[expected_dates.popleft()]
expected_minor.popleft()
expected_items.popleft()
expected_frames.append(frame)
expected_dates.append(date)
rp.add_frame(date, frame)
frames[date] = frame
result = rp.get_current()
np.testing.assert_array_equal(sorted(result.minor_axis.values),
sorted(expected_minor))
np.testing.assert_array_equal(sorted(result.items.values),
sorted(expected_items))
tm.assert_frame_equal(frame.T,
result.ix[frame.index, -1, frame.columns])
expected_result = pd.Panel(frames).swapaxes(0, 1)
tm.assert_panel_equal(expected_result,
result)
# Insert new items
minor.popleft()
minor.append(add_item)
items.popleft()
items.append(add_item)
expected_minor.append(add_item)
expected_items.append(add_item)
| apache-2.0 |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/sparse/panel.py | 9 | 18717 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import warnings
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError("value cannot be a MultiIndex")
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None,
default_fill_value=np.nan, default_kind='block',
copy=False):
# deprecation #11157
warnings.warn("SparsePanel is deprecated and will be removed in a future version",
FutureWarning, stacklevel=2)
if frames is None:
frames = {}
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = \
SparseDataFrame(vals, index=major_axis,
columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames,
major_axis,
minor_axis) = _convert_frames(frames, major_axis,
minor_axis, kind=kind,
fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis,
self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values
for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError("itemps cannot be a MultiIndex")
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, kind=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
return (self._frames, com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value, self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames))
d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d))
else:
new_data = self._frames.copy()
d['default_fill_value']=self.default_fill_value
d['default_kind']=self.default_kind
return SparsePanel(new_data, **d)
def to_frame(self, filter_observations=True):
"""
Convert SparsePanel to (dense) DataFrame
Returns
-------
frame : DataFrame
"""
if not filter_observations:
raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
counts = np.zeros(N * K, dtype=int)
d_values = {}
d_indexer = {}
for item in self.items:
frame = self[item]
values, major, minor = _stack_sparse_info(frame)
# values are stacked column-major
indexer = minor * N + major
counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
# have full set of observations for each item
mask = counts == I
# for each item, take mask values at index locations for those sparse
# values, and use that to select values
values = np.column_stack([d_values[item][mask.take(d_indexer[item])]
for item in self.items])
inds, = mask.nonzero()
# still column major
major_labels = inds % N
minor_labels = inds // N
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
verify_integrity=False)
df = DataFrame(values, index=index, columns=self.items)
return df.sortlevel(level=0)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def reindex(self, major=None, items=None, minor=None, major_axis=None,
minor_axis=None, copy=False):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major=major, major_axis=major_axis)
minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis)
if com._all_none(items, major, minor):
raise ValueError('Must specify at least one axis')
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise NotImplementedError('Reindexing with new items not yet '
'supported')
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy()) for k, v in compat.iteritems(new_frames))
return SparsePanel(new_frames, items=items,
major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combine(self, other, func, axis=0):
if isinstance(other, DataFrame):
return self._combineFrame(other, func, axis=axis)
elif isinstance(other, Panel):
return self._combinePanel(other, func)
elif np.isscalar(other):
new_frames = dict((k, func(v, other))
for k, v in compat.iteritems(self))
return self._new_like(new_frames)
def _combineFrame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
# TODO: make faster!
new_frames = {}
for item, item_slice in zip(self.items, new_values):
old_frame = self[item]
ofv = old_frame.default_fill_value
ok = old_frame.default_kind
new_frames[item] = SparseDataFrame(item_slice,
index=self.major_axis,
columns=self.minor_axis,
default_fill_value=ofv,
default_kind=ok)
return self._new_like(new_frames)
def _new_like(self, new_frames):
return SparsePanel(new_frames, self.items, self.major_axis,
self.minor_axis,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combinePanel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
new_frames = {}
for item in items:
new_frames[item] = func(this[item], other[item])
if not isinstance(other, SparsePanel):
new_default_fill = self.default_fill_value
else:
# maybe unnecessary
new_default_fill = func(self.default_fill_value,
other.default_fill_value)
return SparsePanel(new_frames, items, major, minor,
default_fill_value=new_default_fill,
default_kind=self.default_kind)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
slices = dict((k, v.xs(key)) for k, v in compat.iteritems(self))
return DataFrame(slices, index=self.minor_axis, columns=self.items)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : SparseDataFrame
index -> major axis, columns -> items
"""
slices = dict((k, v[key]) for k, v in compat.iteritems(self))
return SparseDataFrame(slices, index=self.major_axis,
columns=self.items,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
# TODO: allow SparsePanel to work with flex arithmetic.
# pow and mod only work for scalars for now
def pow(self, val, *args, **kwargs):
"""wrapper around `__pow__` (only works for scalar values)"""
return self.__pow__(val)
def mod(self, val, *args, **kwargs):
"""wrapper around `__mod__` (only works for scalar values"""
return self.__mod__(val)
# Sparse objects opt out of numexpr
SparsePanel._add_aggregate_operations(use_numexpr=False)
ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, **ops.panel_special_funcs)
SparseWidePanel = SparsePanel
def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'):
from pandas.core.panel import _get_combined_index
output = {}
for item, df in compat.iteritems(frames):
if not isinstance(df, SparseDataFrame):
df = SparseDataFrame(df, default_kind=kind,
default_fill_value=fill_value)
output[item] = df
if index is None:
all_indexes = [df.index for df in output.values()]
index = _get_combined_index(all_indexes)
if columns is None:
all_columns = [df.columns for df in output.values()]
columns = _get_combined_index(all_columns)
index = _ensure_index(index)
columns = _ensure_index(columns)
for item, df in compat.iteritems(output):
if not (df.index.equals(index) and df.columns.equals(columns)):
output[item] = df.reindex(index=index, columns=columns)
return output, index, columns
def _stack_sparse_info(frame):
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
for col in frame.columns:
series = frame[col]
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
sparse_values = np.concatenate(vals_to_concat)
return sparse_values, major_labels, minor_labels
| artistic-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.