repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
JonasWallin/BayesFlow | examples/article1/article_simulatedata.py | 1 | 5285 | '''
Created on Aug 10, 2014
@author: jonaswallin
'''
from __future__ import print_function, division
import sys
import numpy as np
import numpy.random as npr
import scipy.linalg as spl
import BayesFlow.PurePython.GMM as GMM
import BayesFlow.PurePython.distribution.wishart as wishart
import BayesFlow.utils as util
def simulate_data_v2(n_cells, n_persons, seed = None, silent = True):
"""
simulating a larger data sets for article
"""
sigmas = np.load('covs_.npy')
thetas = np.load('means_.npy')
weights = np.load('weights_.npy')
weights /= np.sum(weights)
if not silent:
print("preprocsseing sigma:", end = '')
sys.stdout.flush()
sigma_theta = []
for sigma in sigmas:
var_ = np.sort(np.linalg.eig(sigma)[0])
z_sigma = var_[0] * npr.randn(*sigma.shape)
sigma_theta.append( 0.1*(sigma + np.dot(z_sigma.T,z_sigma)) )
if not silent:
print("done")
sys.stdout.flush()
nu = 100
ratio_act = np.array([ 1., 0.5, 0.5, 0.5 , 0.5, 1, 1,
1, 1 , 1, 1, 1])
ratio_act = np.ones(ratio_act.shape) #warning warning test
Y, act_Class, mus, x = simulate_data_( thetas = thetas,
sigma_theta = sigma_theta,
sigmas = sigmas,
weights = weights,
nu = nu,
ratio_act = ratio_act,
n_cells = n_cells,
n_persons = n_persons,
seed = seed,
silent = silent)
return Y, act_Class, mus , thetas, sigmas, weights, x
def simulate_data_v1(nCells = 5*10**4, nPersons = 40, seed = 123456, ratio_P = [1., 1., 0.8, 0.1]):
"""
Simulates the data following the instruction presented in the article
"""
if seed is not None:
npr.seed(seed)
P = [0.49, 0.3, 0.2 , 0.01 ]
Thetas = [np.array([0.,0, 0]), np.array([0, -2, 1]), np.array([1., 2, 0]), np.array([-2,2,1.5])]
Z_Sigma = [np.array([[1.27, 0.25, 0],[0.25, 0.27, -0.001],[0., -0.001, 0.001]]),
np.array([[0.06, 0.04, -0.03],[0.04, 0.05, 0],[-0.03, 0., 0.09]]),
np.array([[0.44, 0.08, 0.08],[0.08, 0.16, 0],[0.08, 0., 0.16]]),
0.01*np.eye(3)]
Sigmas = [0.1*np.eye(3), 0.1*spl.toeplitz([2.,0.5,0]),0.1* spl.toeplitz([2.,-0.5,1]),
0.1*spl.toeplitz([1.,.3,.3]) ]
nu = 100
Y, act_Class, mus, x = simulate_data_(Thetas, Z_Sigma, Sigmas, P, nu = nu, ratio_act = ratio_P, n_cells = nCells, n_persons = nPersons,
seed = seed)
return Y, act_Class, mus, Thetas, Sigmas, P
def simulate_data_( thetas, sigma_theta, sigmas, weights, nu = 100, ratio_act = None, n_cells = 5*10**4, n_persons = 40,
seed = None, silent = True):
"""
simulating data given:
*thetas* list of latent means
*sigma_theta* variation between the means
*sigmas* list of latent covariances
*weights* list of probabilites
*nu* inverse wishart parameter
*ratio_act* probabilility that the cluster is active at a person
*n_cells* number of cells at a person
*n_persons* number of persons
*seed* random number generator
"""
if seed is None:
npr.seed(seed)
K = len(weights)
dim = thetas[0].shape[0]
if ratio_act is None:
ratio_act = np.ones(K )
act_class = np.zeros((n_persons, K))
for i in range(K):
act_class[:np.int(np.ceil(n_persons * ratio_act[i])), i] = 1.
Y = []
x = []
nu = 100
mus = []
for i in range(n_persons):
if not silent:
print("setting up person_{i}: ".format(i = i),end = '')
sys.stdout.flush()
mix_obj = GMM.mixture(K = np.int(np.sum(act_class[i, :])))
theta_temp = []
sigma_temp = []
for j in range(K):
if act_class[i, j] == 1:
theta_temp.append(thetas[j] + util.rmvn( np.zeros((dim, 1)), sigma_theta[j] ))
sigma_temp.append(wishart.invwishartrand(nu, (nu - dim - 1) * sigmas[j]))
else:
theta_temp.append(np.ones(dim) * np.NAN)
sigma_temp.append(np.ones((dim,dim)) * np.NAN)
theta_temp_ = [ theta_temp[aC] for aC in np.where(act_class[i, :] == 1)[0]]
sigma_temp_ = [ sigma_temp[aC] for aC in np.where(act_class[i, :] == 1)[0]]
mix_obj.mu = theta_temp_
mus.append(theta_temp)
mix_obj.sigma = sigma_temp_
p_ = np.array([ (0.2*np.random.rand()+0.9) * weights[aC] for aC in np.where(act_class[i, :] == 1)[0]] )
p_ /= np.sum(p_)
mix_obj.p = p_
mix_obj.d = dim
#Y_, x_ = mix_obj.simulate_data2(np.int(np.floor(0.99*n_cells)))
Y_, x_ = mix_obj.simulate_data2(n_cells)
noise_variance = np.eye(mix_obj.d)
np.fill_diagonal(noise_variance, np.var(Y_,0))
#Y_noise = npr.multivariate_normal(np.mean(Y_,0), noise_variance, size = np.int(np.ceil(0.01*n_cells)))
#Y_ = np.vstack((Y_,Y_noise))
#np.random.shuffle(Y_)
Y.append(Y_)
x.append(x_)
if not silent:
print("done")
sys.stdout.flush()
mus = np.array(mus)
return Y, act_class, mus.T, x
if __name__ == "__main__":
import matplotlib.pyplot as plt
n_persons = 20
seed = 123456
n_cells = 10**4
Y, act_Class, mus , thetas, sigmas, weights = simulate_data_v2(n_cells = n_cells, n_persons = n_persons, seed = seed)
K = len(thetas[:,0])
color = plt.cm.rainbow(np.linspace(0,1,K))
for k in range(4):
plt.figure()
for j in range(mus.shape[1]):
plt.text(thetas[j ,2*k], thetas[j ,2*k + 1], str(j), color = color[j], fontsize=14)
for i in range(n_persons):
for j in range(mus.shape[1]):
plt.scatter(mus[2*k,j,i], mus[2*k + 1,j,i], color = color[j], s=4 )
plt.show()
| gpl-2.0 |
thientu/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ycasg/PyNLO | config.py | 2 | 6254 | # -*- coding: utf-8 -*-
import sys, os
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
templates_path = ['/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx', 'templates', '_templates', '.templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'PyNLO'
copyright = u''
version = 'latest'
release = 'latest'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
htmlhelp_basename = 'pynlo'
file_insertion_enabled = False
latex_documents = [
('index', 'pynlo.tex', u'PyNLO Documentation',
u'', 'manual'),
]
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
if sys.version[0] == '3': # Python 3
from unittest.mock import MagicMock
elif sys.version[0] == '2': # Python 2
from mock import Mock as MagicMock
else:
raise ImportError("Don't know how to import MagicMock.")
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pyfftw', 'scipy', 'numpy', 'matplotlib', 'matplotlib.pyplot']
print "Mocking ", MOCK_MODULES
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
###########################################################################
# auto-created readthedocs.org specific configuration #
###########################################################################
#
# The following code was added during an automated build on readthedocs.org
# It is auto created and injected for every build. The result is based on the
# conf.py.tmpl file found in the readthedocs.org codebase:
# https://github.com/rtfd/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl
#
import sys
import os.path
from six import string_types
from sphinx import version_info
from recommonmark.parser import CommonMarkParser
# Only Sphinx 1.3+
if version_info[0] == 1 and version_info[1] > 2:
# Markdown Support
if 'source_suffix' in globals():
if isinstance(source_suffix, string_types) and source_suffix != '.md':
source_suffix = [source_suffix, '.md']
elif '.md' not in source_suffix:
source_suffix.append('.md')
else:
source_suffix = ['.rst', '.md']
if 'source_parsers' in globals():
if '.md' not in source_parsers:
source_parsers['.md'] = CommonMarkParser
else:
source_parsers = {
'.md': CommonMarkParser,
}
if globals().get('source_suffix', False):
if isinstance(source_suffix, string_types):
SUFFIX = source_suffix
else:
SUFFIX = source_suffix[0]
else:
SUFFIX = '.rst'
#Add RTD Template Path.
if 'templates_path' in globals():
templates_path.insert(0, '/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx')
else:
templates_path = ['/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx', 'templates', '_templates',
'.templates']
# Add RTD Static Path. Add to the end because it overwrites previous files.
if not 'html_static_path' in globals():
html_static_path = []
if os.path.exists('_static'):
html_static_path.append('_static')
html_static_path.append('/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx/_static')
# Add RTD Theme only if they aren't overriding it already
using_rtd_theme = False
if 'html_theme' in globals():
if html_theme in ['default']:
# Allow people to bail with a hack of having an html_style
if not 'html_style' in globals():
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_style = None
html_theme_options = {}
if 'html_theme_path' in globals():
html_theme_path.append(sphinx_rtd_theme.get_html_theme_path())
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
using_rtd_theme = True
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_style = None
html_theme_options = {}
if 'html_theme_path' in globals():
html_theme_path.append(sphinx_rtd_theme.get_html_theme_path())
else:
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
using_rtd_theme = True
# Force theme on setting
if globals().get('RTD_NEW_THEME', False):
html_theme = 'sphinx_rtd_theme'
html_style = None
html_theme_options = {}
using_rtd_theme = True
if globals().get('RTD_OLD_THEME', False):
html_style = 'rtd.css'
html_theme = 'default'
if globals().get('websupport2_base_url', False):
websupport2_base_url = 'https://readthedocs.org//websupport'
if 'http' not in settings.MEDIA_URL:
websupport2_static_url = 'https://media.readthedocs.org/static/'
else:
websupport2_static_url = 'https://media.readthedocs.org//static'
#Add project information to the template context.
context = {
'using_theme': using_rtd_theme,
'html_theme': html_theme,
'current_version': "latest",
'MEDIA_URL': "https://media.readthedocs.org/",
'PRODUCTION_DOMAIN': "readthedocs.org",
'versions': [
("latest", "/en/latest/"),
],
'downloads': [
],
'slug': 'pynlo',
'name': u'PyNLO',
'rtd_language': u'en',
'canonical_url': 'http://pynlo.readthedocs.org/en/latest/',
'analytics_code': '',
'single_version': False,
'conf_py_path': '/./',
'api_host': 'https://readthedocs.org/',
'github_user': 'ycasg',
'github_repo': 'PyNLO',
'github_version': 'master',
'display_github': True,
'bitbucket_user': 'None',
'bitbucket_repo': 'None',
'bitbucket_version': 'master',
'display_bitbucket': False,
'READTHEDOCS': True,
'using_theme': (html_theme == "default"),
'new_theme': (html_theme == "sphinx_rtd_theme"),
'source_suffix': SUFFIX,
'user_analytics_code': '',
'global_analytics_code': 'UA-17997319-1',
'commit': '180d51c6',
}
if 'html_context' in globals():
html_context.update(context)
else:
html_context = context
# Add custom RTD extension
if 'extensions' in globals():
extensions.append("readthedocs_ext.readthedocs")
else:
extensions = ["readthedocs_ext.readthedocs"]
| gpl-3.0 |
annapowellsmith/openpresc | openprescribing/frontend/management/commands/import_ppu_savings.py | 1 | 9942 | import argparse
import os
import pandas as pd
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from gcutils.bigquery import Client
from common.utils import valid_date
from dmd.models import DMDProduct
from frontend.models import ImportLog
from frontend.models import PPUSaving
from frontend.models import Presentation
from frontend.bq_schemas import PPU_SAVING_SCHEMA, ppu_savings_transform
SUBSTITUTIONS_SPREADSHEET = (
'https://docs.google.com/spreadsheets/d/e/'
'2PACX-1vSsTrjEdRekkcR0H8myL8RwP3XKg2YvTgQwGb5ypNei0IYn4ofr'
'ayVZJibLfN_lnpm6Q9qu_t0yXU5Z/pub?gid=1784930737&single=true'
'&output=csv')
def make_merged_table_for_month(month):
"""Create a new BigQuery table that includes code substitutions, off
which our savings can be computed.
What are code substitutions?
Because (for example) Tramadol tablets and capsules can
almost always be substituted, we consider them the same chemical
for the purposes of our analysis.
Therefore, wherever Tramadol capsules appear in the source data,
we treat them as Tramadol tablets (for example).
The mapping of what we consider equivalent is stored in a Google
Sheet, currently at
https://docs.google.com/spreadsheets/d/1SvMGCKrmqsNkZYuGW18Sf0wTluXyV4bhyZQaVLcO41c/edit
The process of updating this spreadsheet (which should be done
periodically) is documented
[here](https://github.com/ebmdatalab/price-per-dose/issues/11)
"""
cases = []
seen = set()
df = pd.read_csv(SUBSTITUTIONS_SPREADSHEET)
df = df[df['Really equivalent?'] == 'Y']
for row in df.iterrows():
data = row[1]
source_code = data[1].strip()
code_to_merge = data[10].strip()
if source_code not in seen and code_to_merge not in seen:
cases.append((code_to_merge, source_code))
seen.add(source_code)
seen.add(code_to_merge)
prescribing_table = 'normalised_prescribing_standard'
sql = """
SELECT
practice,
pct,
CASE bnf_code
%s
ELSE bnf_code
END AS bnf_code,
month,
actual_cost,
net_cost,
quantity
FROM
{hscic}.%s
WHERE month = TIMESTAMP('%s')
""" % (' '.join(
["WHEN '%s' THEN '%s'" % (when_code, then_code)
for (when_code, then_code) in cases]),
prescribing_table,
month)
target_table_name = (
'prescribing_with_merged_codes_%s' % month.strftime('%Y_%m'))
client = Client('hscic')
table = client.get_table(target_table_name)
table.insert_rows_from_query(sql)
return target_table_name
def get_savings(entity_type, month):
"""Execute SQL to calculate savings in BigQuery, and return as a
DataFrame.
References to issues below are for
https://github.com/ebmdatalab/price-per-dose/issues
"""
prescribing_table = "{hscic}.%s" % (
make_merged_table_for_month(month)
)
# This is interpolated into the SQL template as it is used multiple times.
restricting_condition = (
"AND LENGTH(RTRIM(p.bnf_code)) >= 15 "
"AND p.bnf_code NOT LIKE '0302000C0____BE' " # issue #10
"AND p.bnf_code NOT LIKE '0302000C0____BF' " # issue #10
"AND p.bnf_code NOT LIKE '0302000C0____BH' " # issue #10
"AND p.bnf_code NOT LIKE '0302000C0____BG' " # issue #10
"AND p.bnf_code NOT LIKE '0904010H0%' " # issue #9
"AND p.bnf_code NOT LIKE '0904010H0%' " # issue #9
"AND p.bnf_code NOT LIKE '1311070S0____AA' " # issue #9
"AND p.bnf_code NOT LIKE '1311020L0____BS' " # issue #9
"AND p.bnf_code NOT LIKE '0301020S0____AA' " # issue #12
"AND p.bnf_code NOT LIKE '190700000BBCJA0' " # issue #12
"AND p.bnf_code NOT LIKE '0604011L0BGAAAH' " # issue #12
"AND p.bnf_code NOT LIKE '1502010J0____BY' " # issue #12
"AND p.bnf_code NOT LIKE '1201010F0AAAAAA' " # issue #12
"AND p.bnf_code NOT LIKE '0107010S0AAAGAG' " # issue #12
"AND p.bnf_code NOT LIKE '060016000BBAAA0' " # issue #14
"AND p.bnf_code NOT LIKE '190201000AABJBJ' " # issue #14
"AND p.bnf_code NOT LIKE '190201000AABKBK' " # issue #14
"AND p.bnf_code NOT LIKE '190201000AABLBL' " # issue #14
"AND p.bnf_code NOT LIKE '190201000AABMBM' " # issue #14
"AND p.bnf_code NOT LIKE '190201000AABNBN' " # issue #14
"AND p.bnf_code NOT LIKE '190202000AAADAD' " # issue #14
)
# Generate variable SQL based on if we're interested in CCG or
# practice-level data
if entity_type == 'pct':
select = 'savings.presentations.pct AS pct,'
inner_select = 'presentations.pct, '
group_by = 'presentations.pct, '
min_saving = 1000
elif entity_type == 'practice':
select = ('savings.presentations.practice AS practice,'
'savings.presentations.pct AS pct,')
inner_select = ('presentations.pct, '
'presentations.practice,')
group_by = ('presentations.practice, '
'presentations.pct,')
min_saving = 50
else:
# 7d21f9c6 (#769) removed 'product'` as a possible entity_type. We may
# want to revisit this.
assert False
fpath = os.path.dirname(__file__)
# Execute SQL
with open("%s/ppu_sql/savings_for_decile.sql" % fpath, "r") as f:
sql = f.read()
substitutions = (
('{{ restricting_condition }}', restricting_condition),
('{{ month }}', month.strftime('%Y-%m-%d')),
('{{ group_by }}', group_by),
('{{ select }}', select),
('{{ prescribing_table }}', prescribing_table),
('{{ inner_select }}', inner_select),
('{{ min_saving }}', min_saving)
)
for key, value in substitutions:
sql = sql.replace(key, str(value))
# Format results in a DataFrame
client = Client()
df = client.query_into_dataframe(sql, legacy=True)
# Rename null values in category, so we can group by it
df.loc[df['category'].isnull(), 'category'] = 'NP8'
df = df.set_index(
'generic_presentation')
df.index.name = 'bnf_code'
# Add in substitutions column
subs = pd.read_csv(SUBSTITUTIONS_SPREADSHEET).set_index('Code')
subs = subs[subs['Really equivalent?'] == 'Y'].copy()
subs['formulation_swap'] = (
subs['Formulation'] +
' / ' +
subs['Alternative formulation'])
df = df.join(
subs[['formulation_swap']], how='left')
# Convert nans to Nones
df = df.where((pd.notnull(df)), None)
return df
class Command(BaseCommand):
args = ''
help = 'Imports cost savings for a month'
def add_arguments(self, parser):
parser.add_argument(
'--month',
type=valid_date)
def handle(self, *args, **options):
'''
Compute and store cost savings for the specified month.
Deletes any existing data for that month.
'''
if not options['month']:
last_prescribing = ImportLog.objects.latest_in_category(
'prescribing').current_at
options['month'] = last_prescribing
log = ImportLog.objects.latest_in_category('ppu')
if log is not None:
if options['month'] <= log.current_at:
raise argparse.ArgumentTypeError("Couldn't infer date")
with transaction.atomic():
# Create custom DMD Products for our overrides, if they
# don't exist.
DMDProduct.objects.get_or_create(
dmdid=10000000000,
bnf_code='0601060D0AAA0A0',
vpid=10000000000,
name='Glucose Blood Testing Reagents',
concept_class=1,
product_type=1
)
Presentation.objects.get_or_create(
bnf_code='0601060D0AAA0A0',
name='Glucose Blood Testing Reagents',
is_generic=True)
DMDProduct.objects.get_or_create(
dmdid=10000000001,
vpid=10000000001,
bnf_code='0601060U0AAA0A0',
name='Urine Testing Reagents',
product_type=1,
concept_class=1)
Presentation.objects.get_or_create(
bnf_code='0601060U0AAA0A0',
name='Urine Testing Reagents',
is_generic=True)
PPUSaving.objects.filter(date=options['month']).delete()
for entity_type in ['pct', 'practice']:
result = get_savings(entity_type, options['month'])
for row in result.itertuples():
d = row._asdict()
if d['price_per_unit']:
PPUSaving.objects.create(
date=options['month'],
presentation_id=d['Index'],
lowest_decile=d['lowest_decile'],
quantity=d['quantity'],
price_per_unit=d['price_per_unit'],
possible_savings=d['possible_savings'],
formulation_swap=d['formulation_swap'] or None,
pct_id=d.get('pct', None),
practice_id=d.get('practice', None)
)
ImportLog.objects.create(
category='ppu',
filename='n/a',
current_at=options['month'])
client = Client('hscic')
table = client.get_or_create_table('ppu_savings', PPU_SAVING_SCHEMA)
columns = [field.name for field in PPU_SAVING_SCHEMA]
table.insert_rows_from_pg(
PPUSaving,
columns,
ppu_savings_transform
)
| mit |
jmschrei/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
the13fools/Bokeh_Examples | glyphs/anscombe.py | 3 | 3251 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.glyphs import Circle, Line
from bokeh.objects import (
ColumnDataSource, Glyph, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(
x_range=xdr, y_range=ydr, data_sources=[lines_source, circles_source],
title=title, plot_width=400, plot_height=400, border_fill='white', background_fill='#e9e0db')
xaxis = LinearAxis(plot=plot, dimension=0, location="bottom", axis_line_color=None)
yaxis = LinearAxis(plot=plot, dimension=1, location="left", axis_line_color=None)
xgrid = Grid(plot=plot, dimension=0, axis=xaxis)
ygrid = Grid(plot=plot, dimension=1, axis=yaxis)
line_renderer = Glyph(
data_source = lines_source,
xdata_range = xdr,
ydata_range = ydr,
glyph = Line(x='x', y='y', line_color="#666699", line_width=2),
)
plot.renderers.append(line_renderer)
circle_renderer = Glyph(
data_source = circles_source,
xdata_range = xdr,
ydata_range = ydr,
glyph = Circle(x=xname, y=yname, size=12, fill_color="#cc6633",
line_color="#cc6633", fill_alpha=0.5),
)
plot.renderers.append(circle_renderer)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]], plot_width=800)
doc = Document( )
doc.add(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
pianomania/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 55 | 9939 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/cross_decomposition/pls_.py | 9 | 31346 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import pinv2, svd
from scipy.sparse.linalg import svds
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..utils.extmath import svd_flip
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
from ..exceptions import ConvergenceWarning
from ..externals import six
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = pinv2(X, check_finite=False)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# If y_score only has zeros x_weights will only have zeros. In
# this case add an epsilon to converge to a more acceptable
# solution
if np.dot(x_weights.T, x_weights) < eps:
x_weights += eps
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = pinv2(Y, check_finite=False) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached',
ConvergenceWarning)
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights : boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : int (default 500)
The maximum number of iterations
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_ : array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy,
ensure_min_samples=2)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
check_finite=False))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
check_finite=False))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = self.coef_ * self.y_std_
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_ : array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining
PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`_
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
Number of components to keep
scale : boolean, (default True)
Option to scale data
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS
<http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`_
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy,
ensure_min_samples=2)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = svd(C, full_matrices=False)
else:
U, s, V = svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""
Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y).transform(X, y)
| bsd-3-clause |
belltailjp/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
netoaraujjo/hal | clustering/affinity_propagation.py | 1 | 1711 | #-*- coding: utf-8 -*-
from sklearn.cluster import AffinityPropagation as sk_AffinityPropagation
from .clustering import Clustering
class AffinityPropagation(Clustering):
"""docstring for AffinityPropagation."""
def __init__(self, data, damping = 0.5, max_iter = 200, convergence_iter = 15,
copy = True, preference = None, affinity = 'euclidean',
verbose = False):
super(Clustering, self).__init__()
self.data = data
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.preference = preference
self.affinity = affinity
self.verbose = verbose
def execute(self):
"""Constroi o modelo de clusterizacao."""
self.model = sk_AffinityPropagation(damping = self.damping,
max_iter = self.max_iter,
convergence_iter = self.convergence_iter,
copy = self.copy,
preference = self.preference,
affinity = self.affinity,
verbose = self.verbose).fit(self.data)
self.clusters = super().make_clusters(self.data, self.model.labels_)
@property
def labels_(self):
"""Retorna os labels dos elementos do dataset."""
return self.model.labels_
@property
def clusters_(self):
"""Retorna um dicionaro onde os indices dos grupos sao as chaves."""
return self.clusters
@property
def model_(self):
"""Retorna o modelo de agrupamento."""
return self.model_
| mit |
bthirion/nipy | examples/labs/need_data/histogram_fits.py | 4 | 2055 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
"""
Example of a script that perfoms histogram analysis of an activation image.
This is based on a real fMRI image.
Simply modify the input image path to make it work on your preferred image.
Needs matplotlib
Author : Bertrand Thirion, 2008-2009
"""
import os
import numpy as np
import scipy.stats as st
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nibabel import load
import nipy.algorithms.statistics.empirical_pvalue as en
# Local import
from get_data_light import DATA_DIR, get_second_level_dataset
# parameters
verbose = 1
theta = float(st.t.isf(0.01, 100))
# paths
mask_image = os.path.join(DATA_DIR, 'mask.nii.gz')
input_image = os.path.join(DATA_DIR, 'spmT_0029.nii.gz')
if (not os.path.exists(mask_image)) or (not os.path.exists(input_image)):
get_second_level_dataset()
# Read the mask
nim = load(mask_image)
mask = nim.get_data()
# read the functional image
rbeta = load(input_image)
beta = rbeta.get_data()
beta = beta[mask > 0]
mf = plt.figure(figsize=(13, 5))
a1 = plt.subplot(1, 3, 1)
a2 = plt.subplot(1, 3, 2)
a3 = plt.subplot(1, 3, 3)
# fit beta's histogram with a Gamma-Gaussian mixture
bfm = np.array([2.5, 3.0, 3.5, 4.0, 4.5])
bfp = en.gamma_gaussian_fit(beta, bfm, verbose=1, mpaxes=a1)
# fit beta's histogram with a mixture of Gaussians
alpha = 0.01
pstrength = 100
bfq = en.three_classes_GMM_fit(beta, bfm, alpha, pstrength,
verbose=1, mpaxes=a2)
# fit the null mode of beta with the robust method
efdr = en.NormalEmpiricalNull(beta)
efdr.learn()
efdr.plot(bar=0, mpaxes=a3)
a1.set_title('Fit of the density with \n a Gamma-Gaussian mixture')
a2.set_title('Fit of the density with \n a mixture of Gaussians')
a3.set_title('Robust fit of the density \n with a single Gaussian')
plt.show()
| bsd-3-clause |
khkaminska/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
datapythonista/pandas | pandas/core/dtypes/inference.py | 3 | 9194 | """ basic inference routines """
from collections import abc
from numbers import Number
import re
from typing import Pattern
import numpy as np
from pandas._libs import lib
from pandas._typing import ArrayLike
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
is_scalar = lib.is_scalar
is_decimal = lib.is_decimal
is_interval = lib.is_interval
is_list_like = lib.is_list_like
is_iterator = lib.is_iterator
def is_number(obj) -> bool:
"""
Check if the object is a number.
Returns True when the object is a number, and False if is not.
Parameters
----------
obj : any type
The object to check if is a number.
Returns
-------
is_number : bool
Whether `obj` is a number or not.
See Also
--------
api.types.is_integer: Checks a subgroup of numbers.
Examples
--------
>>> from pandas.api.types import is_number
>>> is_number(1)
True
>>> is_number(7.15)
True
Booleans are valid because they are int subclass.
>>> is_number(False)
True
>>> is_number("foo")
False
>>> is_number("5")
False
"""
return isinstance(obj, (Number, np.number))
def iterable_not_string(obj) -> bool:
"""
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> iterable_not_string([1, 2, 3])
True
>>> iterable_not_string("foo")
False
>>> iterable_not_string(1)
False
"""
return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
def is_file_like(obj) -> bool:
"""
Check if the object is a file-like object.
For objects to be considered file-like, they must
be an iterator AND have either a `read` and/or `write`
method as an attribute.
Note: file-like objects must be iterable, but
iterable objects need not be file-like.
Parameters
----------
obj : The object to check
Returns
-------
is_file_like : bool
Whether `obj` has file-like properties.
Examples
--------
>>> import io
>>> buffer = io.StringIO("data")
>>> is_file_like(buffer)
True
>>> is_file_like([1, 2, 3])
False
"""
if not (hasattr(obj, "read") or hasattr(obj, "write")):
return False
return bool(hasattr(obj, "__iter__"))
def is_re(obj) -> bool:
"""
Check if the object is a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex : bool
Whether `obj` is a regex pattern.
Examples
--------
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
"""
return isinstance(obj, Pattern)
def is_re_compilable(obj) -> bool:
"""
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check
Returns
-------
is_regex_compilable : bool
Whether `obj` can be compiled as a regex pattern.
Examples
--------
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
"""
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_array_like(obj) -> bool:
"""
Check if the object is array-like.
For an object to be considered array-like, it must be list-like and
have a `dtype` attribute.
Parameters
----------
obj : The object to check
Returns
-------
is_array_like : bool
Whether `obj` has array-like properties.
Examples
--------
>>> is_array_like(np.array([1, 2, 3]))
True
>>> is_array_like(pd.Series(["a", "b"]))
True
>>> is_array_like(pd.Index(["2016-01-01"]))
True
>>> is_array_like([1, 2, 3])
False
>>> is_array_like(("a", "b"))
False
"""
return is_list_like(obj) and hasattr(obj, "dtype")
def is_nested_list_like(obj) -> bool:
"""
Check if the object is list-like, and that all of its elements
are also list-like.
Parameters
----------
obj : The object to check
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_nested_list_like([[1, 2, 3]])
True
>>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
True
>>> is_nested_list_like(["foo"])
False
>>> is_nested_list_like([])
False
>>> is_nested_list_like([[1, 2, 3], 1])
False
Notes
-----
This won't reliably detect whether a consumable iterator (e. g.
a generator) is a nested-list-like without consuming the iterator.
To avoid consuming it, we always return False if the outer container
doesn't define `__len__`.
See Also
--------
is_list_like
"""
return (
is_list_like(obj)
and hasattr(obj, "__len__")
and len(obj) > 0
and all(is_list_like(item) for item in obj)
)
def is_dict_like(obj) -> bool:
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (
all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type)
)
def is_named_tuple(obj) -> bool:
"""
Check if the object is a named tuple.
Parameters
----------
obj : The object to check
Returns
-------
is_named_tuple : bool
Whether `obj` is a named tuple.
Examples
--------
>>> from collections import namedtuple
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
>>> is_named_tuple(p)
True
>>> is_named_tuple((1, 2))
False
"""
return isinstance(obj, tuple) and hasattr(obj, "_fields")
def is_hashable(obj) -> bool:
"""
Return True if hash(obj) will succeed, False otherwise.
Some types will pass a test against collections.abc.Hashable but fail when
they are actually hashed with hash().
Distinguish between these and other types by trying the call to hash() and
seeing if they raise TypeError.
Returns
-------
bool
Examples
--------
>>> import collections
>>> a = ([],)
>>> isinstance(a, collections.abc.Hashable)
True
>>> is_hashable(a)
False
"""
# Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),
# which can be faster than calling hash. That is because numpy scalars
# fail this test.
# Reconsider this decision once this numpy bug is fixed:
# https://github.com/numpy/numpy/issues/5562
try:
hash(obj)
except TypeError:
return False
else:
return True
def is_sequence(obj) -> bool:
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
"""
try:
iter(obj) # Can iterate over it.
len(obj) # Has a length associated with it.
return not isinstance(obj, (str, bytes))
except (TypeError, AttributeError):
return False
def is_dataclass(item):
"""
Checks if the object is a data-class instance
Parameters
----------
item : object
Returns
--------
is_dataclass : bool
True if the item is an instance of a data-class,
will return false if you pass the data class itself
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> is_dataclass(Point)
False
>>> is_dataclass(Point(0,2))
True
"""
try:
from dataclasses import is_dataclass
return is_dataclass(item) and not isinstance(item, type)
except ImportError:
return False
def is_inferred_bool_dtype(arr: ArrayLike) -> bool:
"""
Check if this is a ndarray[bool] or an ndarray[object] of bool objects.
Parameters
----------
arr : np.ndarray or ExtensionArray
Returns
-------
bool
Notes
-----
This does not include the special treatment is_bool_dtype uses for
Categorical.
"""
if not isinstance(arr, np.ndarray):
return False
dtype = arr.dtype
if dtype == np.dtype(bool):
return True
elif dtype == np.dtype("object"):
return lib.is_bool_array(arr.ravel("K"))
return False
| bsd-3-clause |
iABC2XYZ/abc | DM_RFGAP_6/From ~4/Test.py | 3 | 4238 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 14:29:03 2017
Author: Peiyong Jiang : [email protected]
Function:
Test
"""
from InputBeam import *
from InputLattice import *
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from EmitNG import EmitN2G3D,EmitG2N3D,EmitN2G
from PartGen import PartGen
from LayerTwiss import LayerTwiss
from LayerMap import LayerMap
from ActionFunction import MyAct, Action_Min_Max
from Constants import pi
from RFGap import LengthCellM
from Twiss import Emit3D
plt.close('all')
wAlphaT=tf.Variable(tf.random_uniform(shape=[3],minval=-1.,maxval=1.))
wGammaT=tf.Variable(tf.random_uniform(shape=[3],minval=0.1,maxval=4.))
wETLMV=tf.Variable(tf.random_uniform(shape=[numCav],minval=0.001,maxval=0.3))
wPhis=tf.Variable(tf.random_uniform(shape=[numCav],minval=-np.pi/2.,maxval=np.pi/2))
##############################################################################
gEmitTInput=EmitN2G3D(nEmitTInput,energyInMeV)
x,xp,y,yp,z,zp=PartGen(numPart)
x,xp,y,yp,phi,Ek=LayerTwiss(x,xp,y,yp,z,zp,gEmitTInput,wAlphaT,wGammaT)
lenCellM=LengthCellM(wETLMV,wPhis)
for iCav in range(numCav):
ETLMV=wETLMV[iCav]
lenM=lenCellM[iCav]
if iCav<numCav-1:
x,xp,y,yp,phi,Ek=LayerMap(x,xp,y,yp,phi,Ek,ETLMV,lenM)
else:
len2M=lenCellM[iCav+1]
x,xp,y,yp,phi,Ek=LayerMap(x,xp,y,yp,phi,Ek,ETLMV,lenM,LastCellLen=len2M)
emitX,emitY,emitZ=Emit3D(x,xp,y,yp,phi,Ek)
lossEmit=emitX # should use n times other than emittance
trainEmit=tf.train.GradientDescentOptimizer(0.01).minimize(lossEmit)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(1000):
sess.run(trainEmit)
xRun=sess.run(x)
xpRun=sess.run(xp)
yRun=sess.run(y)
ypRun=sess.run(yp)
phiRun=sess.run(phi)
EkRun=sess.run(Ek)
phisRun=sess.run(wPhis)
ETLRun=sess.run(wETLMV)
lossEmitRun=sess.run(lossEmit)
print('____________________')
print(phisRun)
print(ETLRun)
print(lossEmitRun)
plt.figure(1)
plt.clf()
plt.subplot(221)
plt.plot(xRun,xpRun,'.')
plt.subplot(222)
plt.plot(yRun,ypRun,'.')
plt.subplot(223)
plt.plot(phiRun,EkRun,'.')
plt.subplot(224)
plt.hold
plt.plot(phisRun,'b.')
plt.plot(ETLRun,'r.')
plt.show()
plt.pause(0.1)
print('OK')
"""
emitTNProd_Input=tf.reduce_prod(emitTN_Input)
emitTG_Input=EmitN2G3D(emitTN_Input,energyInMeV)
x0,xp0,y0,xp0,phi0,Ek0= PartGen6D(emitTG_Input,wAlphaT,wBetaT,numPart,energyInMeV,freqMHz)
x1,xp1,y1,xp1,phi1,Ek1=APF(wETLMV,wLenCellM,x0,xp0,y0,xp0,phi0,Ek0)
#___________________________________________________________________________
xMaxEmit,yMaxEmit,zMaxEmit=PartMaxPow6D(x1,xp1,y1,xp1,phi1,Ek1,coePow=0.01)
maxX,maxXP,maxY,maxYP,maxPhi,maxEnergy=PartMax6D(x1,xp1,y1,xp1,phi1,Ek1)
x,xp,y,xp,phi,Ek=PartLimit6D(x1,xp1,y1,xp1,phi1,Ek1)
emitTG_OutputTmp=Emit3DLimit(x,xp,y,xp,phi,Ek,energyOutMeV,freqMHz)
emitTG_Output=emitTG_OutputTmp*[xMaxEmit,yMaxEmit,zMaxEmit]
#_____________________________________________________________________________
emitTN_Output=EmitG2N3D(emitTG_Output,energyOutMeV)
emitTNProd_Output=tf.reduce_prod(tf.pow(emitTN_Output,0.01))
EmitGrowth=tf.div(emitTNProd_Output,emitTNProd_Input)
#_____________________________________________
lossEmit=EmitGrowth
optimizerEmit=tf.train.AdamOptimizer(0.001)
trainEmit=optimizerEmit.minimize(lossEmit)
#____________________________________________
#oTest=tf.concat([x,xp,y,xp,phi,Ek],0)
oTest=[maxX,maxXP,maxY,maxYP,maxPhi,maxEnergy]
#_______________________________________________
"""
'''
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
xRun=sess.run(x1)
xpRun=sess.run(xp1)
print('____________________')
print(sess.run(nEmitTInput))
print(sess.run(gEmitTInput))
print('++++++++++++')
print(xRun)
plt.figure(1)
plt.clf()
plt.plot(xRun,'.')
print('OK')
'''
| gpl-3.0 |
glennq/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 31 | 13747 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories : None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing : optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. "
"This may take a few minutes.")
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
idlead/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
moonbury/notebooks | github/MasteringMatplotlib/radar.py | 3 | 4341 | """
Example of creating a radar chart (a.k.a. a spider or star chart) [1]_. The
source code in this module was adapted from the radar chart example in the
matplotlib gallery [2]_.
Although this example allows a frame of either 'circle' or 'polygon', polygon
frames don't have proper gridlines (the lines are circles instead of polygons).
It's possible to get a polygon grid by setting GRIDLINE_INTERPOLATION_STEPS in
matplotlib.axis to the desired number of vertices, but the orientation of the
polygon is not aligned with the radial axes.
.. [1] http://en.wikipedia.org/wiki/Radar_chart
.. [2] http://matplotlib.org/examples/api/radar_chart.html
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
class RadarAxes(PolarAxes):
name = 'radar'
def __init__(self, figure=None, rect=None, spoke_count=0,
radar_patch_type="polygon", radar_spine_type="circle",
*args, **kwargs):
resolution = kwargs.pop("resolution", 1)
self.spoke_count = spoke_count
self.radar_patch_type = radar_patch_type
# spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.
self.radar_spine_type = radar_spine_type
if figure == None:
figure = plt.gcf()
if rect == None:
rect = figure.bbox_inches
# calculate evenly-spaced axis angles
self.radar_theta = (
2 * np.pi *
np.linspace(0, 1 - 1.0 / self.spoke_count, self.spoke_count))
# rotate theta such that the first axis is at the top
self.radar_theta += np.pi / 2
super(RadarAxes, self).__init__(figure, rect, resolution=resolution,
*args, **kwargs)
def draw_patch(self):
if self.radar_patch_type == "polygon":
return self.draw_poly_patch()
elif self.radar_patch_type == "circle":
return draw_circle_patch()
def draw_poly_patch(self):
verts = unit_poly_verts(self.radar_theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(self.radar_theta * 180 / np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if self.radar_patch_type == 'circle':
return PolarAxes._gen_axes_spines(self)
# The following is a hack to get the spines (i.e. the axes frame)
# to draw correctly for a polygon frame.
spine_type = 'circle'
verts = unit_poly_verts(self.radar_theta)
# close off polygon by repeating first vertex
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, self.radar_spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
def _as_mpl_axes(self):
return RadarAxes, {"spoke_count": self.spoke_count,
"radar_patch_type": self.radar_patch_type,
"radar_spine_type": self.radar_spine_type}
def draw_circle_patch(self):
# unit circle centered on (0.5, 0.5)
return plt.Circle((0.5, 0.5), 0.5)
def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r * np.cos(t) + x0, r * np.sin(t) + y0) for t in theta]
return verts | gpl-3.0 |
mlyundin/Machine-Learning | ex7/ex7.py | 1 | 2706 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.io as sio
import itertools
def find_closest_centroids_slow(X, centroids):
return np.array([np.argmin([np.sum((x-c)**2) for c in centroids]) for x in X])
def find_closest_centroids(X, centroids):
diff = X[np.newaxis, :, :] - centroids[:, np.newaxis, :]
return np.argmin(np.sum(diff**2, axis=-1), axis=0)
def compute_centroids_slow(X, idx, K):
return np.array([np.mean(X[idx.ravel() == i, :], axis=0) for i in range(K)])
def compute_centroids(X, idx, K):
idx = np.arange(K).reshape(-1, 1) == idx.reshape(1, -1)
return idx.dot(X)/(np.sum(idx, axis=1).reshape(-1, 1))
def run_kmeans(X, initial_centroids, max_iters, plot_progress=False):
previous_centroids = initial_centroids
K = len(previous_centroids)
for _ in xrange(max_iters):
idx = find_closest_centroids(X, previous_centroids)
centroids = compute_centroids(X, idx, K)
if (centroids == previous_centroids).all():
break
if plot_progress:
for k in xrange(K):
x1 = [previous_centroids[k, 0], centroids[k, 0]]
x2 = [previous_centroids[k, 1], centroids[k, 1]]
plt.plot(x1, x2, 'k')
plt.plot(x1, x2, 'kx')
previous_centroids = centroids
if plot_progress:
for k, color in zip(xrange(K), itertools.cycle(['r', 'b', 'g', 'k', 'm'])):
x1, x2 = X.T
plt.plot(x1[idx == k], x2[idx == k], color+'o')
plt.show()
return idx, centroids
def kmeans_init_centroids(X, K):
temp = range(len(X))
np.random.shuffle(temp)
return X[temp[:K], :]
if __name__ == '__main__':
data = sio.loadmat('ex7data2.mat')
X = data['X']
K = 3
initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])
idx = find_closest_centroids(X, initial_centroids)
print('Closest centroids for the first 3 examples: ')
print(idx[:3])
print('(the closest centroids should be 0, 2, 1 respectively)')
idx, centroids = run_kmeans(X, initial_centroids, 400, True)
img=mpimg.imread('bird_small.png')
imgplot = plt.imshow(img)
plt.show()
img_size = img.shape
X = img.reshape(img_size[0]*img_size[1], img_size[2])
K = 16
max_iters = 10
initial_centroids = kmeans_init_centroids(X, K)
idx, centroids = run_kmeans(X, initial_centroids, max_iters)
X_recovered = centroids[idx, :]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.imshow(img)
ax2.imshow(X_recovered.reshape(img_size))
plt.show() | mit |
YinongLong/scikit-learn | sklearn/tests/test_common.py | 3 | 9901 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.decomposition import ProjectedGradientNMF
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
_set_test_name)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_set_test_name(check_parameters_default_constructible, name),
name, Estimator)
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield _set_test_name(check, name), name, Estimator
else:
yield _set_test_name(check, name), name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield _set_test_name(check_class_weight_balanced_linear_classifier,
name), name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (_set_test_name(
check_non_transformer_estimators_n_iter, name),
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
estimator = Estimator()
else:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
if isinstance(estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield _set_test_name(
check_transformer_n_iter, name), name, estimator
else:
yield _set_test_name(
check_transformer_n_iter, name), name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False,
include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
# If class is deprecated, ignore deprecated warnings
if hasattr(Estimator.__init__, "deprecated_original"):
with ignore_warnings():
yield _set_test_name(
check_get_params_invariance, name), name, Estimator
else:
yield _set_test_name(
check_get_params_invariance, name), name, Estimator
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexes/datetimes/test_construction.py | 1 | 29322 | from datetime import timedelta
from functools import partial
from operator import attrgetter
import numpy as np
import pytest
import pytz
from pandas._libs.tslib import OutOfBoundsDatetime
from pandas._libs.tslibs import conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
import pandas.util.testing as tm
class TestDatetimeIndex(object):
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = i.tz_localize(None).tz_localize('UTC').tz_convert(tz)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with pytest.raises(TypeError, match='data is already tz-aware'):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(TypeError, match='data is already tz-aware'):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.tseries.frequencies.BDay()).values
result = DatetimeIndex(data, freq=pd.tseries.frequencies.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
DatetimeIndex(start='1/1/2000', periods='foo', freq='D')
pytest.raises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, DatetimeIndex, start='2011-01-01',
freq='b')
pytest.raises(ValueError, DatetimeIndex, end='2011-01-01',
freq='B')
pytest.raises(ValueError, DatetimeIndex, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = DatetimeIndex(freq='D', start=start, end=end, tz=tz)
expected = DatetimeIndex(['2013-01-01 06:00:00',
'2013-01-02 06:00:00'],
tz='America/Los_Angeles')
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone('America/Los_Angeles') is result.tz
@pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp('2010', tz=tz).tz
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(),
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('klass', [Index, DatetimeIndex])
@pytest.mark.parametrize('box', [
np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize('tz, dtype', [
['US/Pacific', 'datetime64[ns, US/Pacific]'],
[None, 'datetime64[ns]']])
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp('2018-01-01', tz=tz)
result = klass(box([ts.value]), dtype=dtype)
expected = klass([ts])
assert result == expected
def test_construction_int_rountrip(self, tz_naive_fixture):
# GH 12619
tz = tz_naive_fixture
result = 1293858000000000000
expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
index = pd.date_range(pd.Timestamp(2000, 1, 1),
pd.Timestamp(2005, 1, 1),
freq='MS', tz='Australia/Melbourne')
test = pd.DataFrame({'data': range(len(index))}, index=index)
test = test.resample('Y').mean()
result = pd.DatetimeIndex([x.replace(month=6, day=1)
for x in test.index])
expected = pd.DatetimeIndex(['2000-06-01 00:00:00',
'2001-06-01 00:00:00',
'2002-06-01 00:00:00',
'2003-06-01 00:00:00',
'2004-06-01 00:00:00',
'2005-06-01 00:00:00'],
tz='Australia/Melbourne')
tm.assert_index_equal(result, expected)
class TestTimeSeries(object):
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
def test_dti_constructor_years_only(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 6961
rng1 = date_range('2014', '2015', freq='M', tz=tz)
expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz)
rng2 = date_range('2014', '2015', freq='MS', tz=tz)
expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz)
rng3 = date_range('2014', '2020', freq='A', tz=tz)
expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz)
rng4 = date_range('2014', '2020', freq='AS', tz=tz)
expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz)
for rng, expected in [(rng1, expected1), (rng2, expected2),
(rng3, expected3), (rng4, expected4)]:
tm.assert_index_equal(rng, expected)
def test_dti_constructor_small_int(self, any_int_dtype):
# see gh-13721
exp = DatetimeIndex(['1970-01-01 00:00:00.00000000',
'1970-01-01 00:00:00.00000001',
'1970-01-01 00:00:00.00000002'])
arr = np.array([0, 10, 20], dtype=any_int_dtype)
tm.assert_index_equal(DatetimeIndex(arr), exp)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
assert rng[0].second == 1
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
assert dti.is_(dti)
assert dti.is_(dti.view())
assert not dti.is_(dti.copy())
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
assert (idx.values == conversion.ensure_datetime64ns(arr)).all()
def test_constructor_int64_nocopy(self):
# GH#1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
assert (index.asi8[50:100] == -1).all()
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
assert (index.asi8[50:100] != -1).all()
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B', 'BH',
'T', 'S', 'L', 'U', 'H', 'N', 'C'])
def test_from_freq_recreate_from_data(self, freq):
org = DatetimeIndex(start='2001/02/01 09:00', freq=freq, periods=1)
idx = DatetimeIndex(org, freq=freq)
tm.assert_index_equal(idx, org)
org = DatetimeIndex(start='2001/02/01 09:00', freq=freq,
tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=freq, tz='US/Pacific')
tm.assert_index_equal(idx, org)
def test_datetimeindex_constructor_misc(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
pytest.raises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'
])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
tm.assert_index_equal(idx7, idx8)
for other in [idx2, idx3, idx4, idx5, idx6]:
assert (idx1.values == other.values).all()
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
assert len(idx) == 20
assert idx[0] == sdate + 0 * offsets.BDay()
assert idx.freq == 'B'
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
assert len(idx) == 20
assert idx[-1] == edate
assert idx.freq == '5D'
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=offsets.Week(weekday=6))
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=offsets.QuarterBegin(startingMonth=1))
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=offsets.BQuarterEnd(startingMonth=12))
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
| bsd-3-clause |
matus-chochlik/various | atmost/presentation/tools/plot-link-actu-diff.py | 1 | 2492 | #!/usr/bin/python3 -B
# coding=utf8
# ------------------------------------------------------------------------------
import os
import sys
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as pltckr
import numpy as np
from common import DictObject, PresArgParser
# ------------------------------------------------------------------------------
class ArgParser(PresArgParser):
# --------------------------------------------------------------------------
def __init__(self, **kw):
PresArgParser.__init__(self, **kw)
self._add_multi_input_arg()
# ------------------------------------------------------------------------------
def make_argparser():
return ArgParser(prog=os.path.basename(__file__))
# ------------------------------------------------------------------------------
def do_plot(options):
data = []
for p in options.input_path:
stats = DictObject.loadJson(p)
for run in stats:
for tgt in run.targets:
try:
data.append((tgt.linked.predicted, tgt.linked.actual))
except AttributeError:
pass
fig, spl = plt.subplots()
options.initialize(plt, fig)
spl.set_xlabel("Actual memory usage [GB]", fontsize=18)
spl.set_ylabel("(Actual - Predicted) memory usage [GB]", fontsize=18)
x = [a for p, a in data]
y = [a-p for p, a in data]
maxx = max(x)
minx = min(x)
maxy = max(y)
miny = min(y)
pn = 1.0/maxy
nn = -1.0/min(y)
spl.broken_barh(
xranges=[(minx, maxx-minx)],
yrange=(0.0, maxy),
color="red",
alpha=0.1
)
spl.broken_barh(
xranges=[(minx, maxx-minx)],
yrange=(miny, -miny),
color="blue",
alpha=0.1
)
mix = lambda a, b, f: (1.0-f)*a + f*b
spl.scatter(
x, y,
color = [(
mix(0.6, 1.0, max(v, 0.0)*pn),
mix(0.6, 0.0, max(max(v, 0.0)*pn, -min(0.0, v)*nn)),
mix(0.6, 1.0, -min(0.0, v)*nn),
) for v in y]
)
spl.grid(which="both", axis="both")
options.finalize(plt)
# ------------------------------------------------------------------------------
def main():
do_plot(make_argparser().make_options())
return 0
# ------------------------------------------------------------------------------
if __name__ == "__main__":
exit(main())
# ------------------------------------------------------------------------------
| mit |
prasadtalasila/IRCLogParser | lib/vis.py | 1 | 16090 | import matplotlib as mpl
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import numpy as np
from scipy.optimize import curve_fit
from sklearn.metrics import mean_squared_error
import config
import util
import igraph
from random import randint
import math
import matplotlib.pyplot as plt
import os
import lib.in_out.saver as saver
from numpy.random import normal
from scipy.optimize import curve_fit
from scipy import stats
import plotly.plotly as py
import plotly.graph_objs as go
from numpy import genfromtxt
import glob
py.sign_in(config.USER_NAME, config.TOKEN)
def plot_data (data, output_directory, output_file_name):
x_data, y_data = (d for d in data)
x = np.array(x_data)
y = np.array(y_data)
plt.figure()
plt.plot(x, y, 'b-', label="Data")
plt.legend()
# plt.show()
saver.check_if_dir_exists(output_directory)
plt.savefig(output_directory + "/" + output_file_name + ".png")
plt.close()
def generate_probability_distribution(data):
"""
Normalises y coordinates, dividing it by sum of all entries of y coordiantes
Args:
data(list of list): list of list representation csv data (with 2 coordinates)
Returns:
x-coordinate (list)
freq (list) normalised-y-coordinates
"""
if data:
topRows = [int(x[1]) for x in data]
total = sum(topRows)
freq = [x/float(total) for x in topRows]
return range(0, len(data)), freq
else:
print "ERROR generate_probability_distribution"
return -1, -1
# FOR CL and RT anaylysis
def exponential_curve_fit_and_plot(data, output_directory, output_file_name):
"""
Fit to an expontial curve and draw the x-y data
Args:
data(list of list): list of list representation csv data (with 2 coordinates)
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
a (int) : curve fit variable for the equation a * np.exp(-b * x) + c
b (int) : curve fit variable for the equation a * np.exp(-b * x) + c
c (int) : curve fit variable for the equation a * np.exp(-b * x) + c
mse (int) : Mean Squared error from the fit
"""
x_pdf, y_pdf = generate_probability_distribution(data)
if y_pdf != -1:
x = np.array(x_pdf)
y = np.array(y_pdf)
popt, pcov = curve_fit(util.exponential_curve_func, x, y)
[a, b, c] = popt
mse = mean_squared_error(util.exponential_curve_func(x, *popt), y)
if config.DEBUGGER:
print "CURVE FIT", output_file_name, "|", a, b, c, "MSE =", mse
plt.figure()
plt.plot(x, y, 'b-', label="Data")
plt.plot(x, util.exponential_curve_func(x, *popt), 'r-', label="Fitted Curve")
axes = plt.gca()
axes.set_xlim([0, 20])
axes.set_ylim([0, 1])
plt.legend()
# plt.show()
saver.check_if_dir_exists(output_directory)
plt.savefig(output_directory + "/" + output_file_name + ".png")
plt.close()
return [a, b, c, mse]
# Ignoring Initial Zeros in CRT
def exponential_curve_fit_and_plot_x_shifted(data, output_directory, output_file_name):
"""
Fit to an expontial curve and draw the x-y data
Also ignores the the input untill first non-zero y-coordinate and shifts the graph along
y axes untill that first non-zero entry
Args:
data(list of list): list of list representation csv data (with 2 coordinates)
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
a (int) : curve fit variable for the equation a * np.exp(-b * x) + c
b (int) : curve fit variable for the equation a * np.exp(-b * x) + c
c (int) : curve fit variable for the equation a * np.exp(-b * x) + c
first_non_zero_index (int): amount by which the graph is shifted along y axis
mse (int) : Mean Squared error from the fit
"""
x_pdf, y_pdf = generate_probability_distribution(data)
if y_pdf != -1:
first_non_zero_index = -1
if filter(lambda x: x != 0, y_pdf):
first_non_zero_index = y_pdf.index(filter(lambda x: x != 0, y_pdf)[0])
x = np.array(x_pdf[0: -1*first_non_zero_index])
y = np.array(y_pdf[first_non_zero_index:])
popt, pcov = curve_fit(util.exponential_curve_func, x, y)
[a, b, c] = popt
mse = mean_squared_error(util.exponential_curve_func(x, *popt), y)
if config.DEBUGGER:
print "CURVE FIT", output_file_name, "|", a, b, c, "x-shift =", first_non_zero_index, "MSE =", mse
plt.figure()
plt.plot(x, y, 'b-', label="Data")
plt.plot(x, util.exponential_curve_func(x, *popt), 'r-', label="Fitted Curve")
axes = plt.gca()
# axes.set_xlim([0 ,20])
axes.set_ylim([0, 1])
plt.xticks(range(0, 20, 5), xrange(first_non_zero_index, len(x), 5), size='small')
plt.legend()
# plt.show()
saver.check_if_dir_exists(output_directory)
plt.savefig(output_directory + "/" + output_file_name + ".png")
plt.close()
return [a, b, c, mse, first_non_zero_index]
def plot_infomap_igraph(i_graph, membership, output_directory, output_file_name, show_edges=True, aux_data=None):
"""
Plots the informap community generated by igraph
Args:
i_graph(object): igraph graph object
membership(list): membership generated by infomap.community_infomap
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
show_edges(bool): toggle to disable/enable edges during viz
Returns:
null
"""
if membership is not None:
graph_copy = i_graph.copy()
edges = []
edges_colors = []
for edge in i_graph.es():
if membership[edge.tuple[0]] != membership[edge.tuple[1]]:
edges.append(edge)
edges_colors.append("#55555520")
# edges_colors.append("#00000000")
else:
edges_colors.append("#00000099")
graph_copy.delete_edges(edges)
layout = graph_copy.layout("kk")
i_graph.es["color"] = edges_colors
else:
layout = i_graph.layout("kk")
i_graph.es["color"] = "gray"
visual_style = {}
visual_style["vertex_label_dist"] = 0
visual_style["vertex_label_size"] = 18
if show_edges:
visual_style["edge_color"] = i_graph.es["color"]
else:
visual_style["edge_color"] = "#00000000"
visual_style["vertex_size"] = 32
visual_style["layout"] = layout
visual_style["bbox"] = (1024, 768)
visual_style["margin"] = 40
#visual_style["edge_label"] = i_graph.es["weight"]
if i_graph.es:
visual_style["edge_width"] = igraph.rescale(i_graph.es['weight'], out_range=(1, 10))
for vertex in i_graph.vs():
vertex["label"] = vertex.index
if membership is not None:
colors = []
for i in range(0, max(membership)+1):
colors.append('%06X' % randint(0, 0xFFFFFF))
for vertex in i_graph.vs():
vertex["vertex_shape"] = "circle"
vertex["color"] = str('#') + colors[membership[vertex.index]]
# coloring for channels vs users
vertex["vertex_shape"] = "square" if (vertex["id"].isdigit() and int(vertex["id"]) >= 1000000) else "circle"
# vertex["color"] = "red" if (vertex["id"].isdigit() and int(vertex["id"]) >= 1000000) else "#00ff00"
if aux_data:
if aux_data["type"] == "MULTI_CH":
# ['#kubuntu-devel', '#kubuntu', '#ubuntu-devel']
color_nodes = ["#ff0000", "#00ff00", "#0000ff"]
vertex["color"] = color_nodes[np.argmax(aux_data["uc_adj"][aux_data["user_hash"].index(vertex["id"])])]
if not aux_data:
visual_style["vertex_color"] = i_graph.vs["color"]
visual_style["vertex_shape"] = i_graph.vs["vertex_shape"]
saver.check_if_dir_exists(output_directory)
igraph.plot(i_graph, (output_directory + "/" + output_file_name + ".png"), **visual_style)
if config.DEBUGGER:
print "INFOMAPS visualisation for", output_file_name, "completed"
def generate_log_plots(plot_data, output_directory, output_file_name):
"""
Generate log plots for given time frame
Args:
plot_data (list of list): data to be plotted
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
slope : The slope of linear fit for the log plot.
r_square :
mean_sqaure_error : Mean sqaure error for best fit.
"""
sum_each_row = []
for row in plot_data[2:]: #ignore degree 0 and text, starting from degree 1
sum_each_row.append(row)
x_axis_log = [math.log(i) for i in xrange(1, len(sum_each_row) + 1)]
y_axis_log = [math.log(i) if i>0 else 0 for i in sum_each_row[0:] ]
slope,intercept,r_square,mean_squared_error = calc_plot_linear_fit(x_axis_log, y_axis_log, output_directory, output_file_name)
return slope,intercept,r_square,mean_squared_error
def calc_plot_linear_fit(x_in, y_in, output_directory, output_file_name):
"""
Calculate and plot linar fit for data
Args:
x_in (list of int): x_axis data
y_in (list of int): y_axis data
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
# get x and y vectors
if x_in and y_in:
x = np.array(x_in)
y = np.array(y_in)
slope, intercept, r_value, p_value, std_err = stats.linregress(x_in, y_in)
line = [slope*xi+intercept for xi in x_in]
print str(slope)+"\t"+str(intercept)+"\t"+str(r_value**2)+"\t"+str(mean_squared_error(y, line))
saver.check_if_dir_exists(output_directory)
if config.USE_PYPLOT:
def trace_helper_pyplot(x, y, label, color):
return go.Scatter(
x=x,
y=y,
mode='lines',
marker=go.Marker(color=color),
name=label
)
trace1 = trace_helper_pyplot(x, y, 'Data', 'rgb(255, 127, 14)')
trace2 = trace_helper_pyplot(x, line, 'Fit', 'rgb(31, 119, 180)')
layout = go.Layout(
title='DegreeNode',
xaxis=go.XAxis(zerolinecolor='rgb(255,255,255)', gridcolor='rgb(255,255,255)'),
# yaxis=go.YAxis(zerolinecolor='rgb(255,255,255)', gridcolor='rgb(255,255,255)')
)
data = [trace1, trace2]
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, output_directory+"/"+output_file_name + ".png")
else:
# graph config
axes = plt.gca()
axes.set_xlim([0, 3])
axes.set_ylim([0, 6])
plt.xlabel("log(degree)")
plt.ylabel("log(no_of_nodes)")
# fit with np.polyfit
m, b = np.polyfit(x, y, 1)
plt.plot(x, y, '-')
plt.plot(x, m*x + b, '-')
plt.legend(['Data', 'Fit'], loc='upper right')
plt.savefig(output_directory+"/" + output_file_name+".png")
plt.close()
return slope,intercept,r_value**2,mean_squared_error(y, line)
else:
print "ERROR calc_plot_linear_fit"
return -1, -1, -1, -1
def generate_group_bar_charts(y_values, x_values, trace_header, output_directory, output_file_name):
"""
Plots multiple bar graphs on same graph
example usage:
generate_group_bar_charts([
[5.10114882, 5.0194652482, 4.9908093076],
[4.5824497358, 4.7083614037, 4.3812775722],
[2.6839471308, 3.0441476209, 3.6403820447]
], ['#kubuntu-devel', '#ubuntu-devel', '#kubuntu'],
['head1', 'head2', 'head3'], '/home/rohan/Desktop/', 'multi_box'
)
Args:
x_in (list of int): x_axis data
y_in (list of int): y_axis data
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
data = [
go.Bar(
x=x_values,
y=y_values[i],
name=trace_header[i]
) for i in range(len(y_values))
]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, output_directory + "/" + output_file_name+".png")
def csv_heatmap_generator_plotly(in_directory, output_directory, output_file_name):
"""
Plots heatmaps for all the csv files in the given directory
Args:
in_directory (str): location of input csv files
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
file_list = glob.glob(in_directory+"*.csv")
for file in file_list:
csv_data = genfromtxt(file, delimiter=',')
trace = go.Heatmap(
z=csv_data,
x=list(range(48)),
y=list(range(1, 12)),
colorscale=[
[0, 'rgb(255, 255, 204)'],
[0.13, 'rgb(255, 237, 160)'],
[0.25, 'rgb(254, 217, 118)'],
[0.38, 'rgb(254, 178, 76)'],
[0.5, 'rgb(253, 141, 60)'],
[0.63, 'rgb(252, 78, 42)'],
[0.75, 'rgb(227, 26, 28)'],
[0.88, 'rgb(189, 0, 38)'],
[1.0, 'rgb(128, 0, 38)']
]
)
data = [trace]
layout = go.Layout(title='HeatMap', width=800, height=640)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename=in_directory+file[file.rfind("/")+1:-4]+'_heatmap.png')
def matplotlob_csv_heatmap_generator(csv_file, output_directory, output_file_name):
"""
Plots heatmaps for all the csv files in the given directory
Can be used as a script for generating heatmaps, faster alternative to plotly
Args:
in_directory (str): location of input csv files
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
column_labels = map(str, range(1, 32))
row_labels = map(str, range(1, 49))
data = genfromtxt(csv_file, delimiter=',')
print(data)
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.pcolor(data, cmap=plt.cm.Reds)
cbar = plt.colorbar(heatmap)
def np_arrange_helper(data, disp):
return np.arange(data) + disp
# put the major ticks at the middle of each cell
ax.set_xticks(np_arrange_helper(data.shape[0], 0.5), minor=False)
ax.set_yticks(np_arrange_helper(data.shape[1], 0.5), minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
plt.savefig(output_directory+"/" + output_file_name+".png")
plt.close()
def box_plot(data, output_directory, output_file_name):
"""
Plots Box Plots
Args:
data (list): data
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
plt.figure()
plt.boxplot(data)
plt.legend()
saver.check_if_dir_exists(output_directory)
plt.savefig(output_directory + "/" + output_file_name + ".png")
plt.close()
| gpl-3.0 |
velezj/ml-chats | time_series/code/neural_network/simple.py | 1 | 8696 | import logging
logger = logging.getLogger( __name__ )
import numpy as np
import scipy
import scipy.stats
import matplotlib.pyplot as plt
##=========================================================================
##
# A simple, fixed, 1 hidden layer network with a
# single input and single output good for a
# scalar function :-).
#
# Wil *do* include an offset/bias term
class SimpleScalarF_1Layer( object ):
##
# Build an new neurla network with a single hidden layer
# with the given number of hidden nodes
def __init__( self,
learning_rate = 0.001,
hidden_layer_size = 10,
init_weight_max_magnitude = 1e-1,
output_sigma = 2.0):
self.hidden_layer_size = hidden_layer_size
self.output_sigma = output_sigma
self.learning_rate = learning_rate
# the hidden layer weights
self.V = ( np.random.random( size=(hidden_layer_size, 2 ) ) - 0.5 ) * 2 * init_weight_max_magnitude
# the output layer weights
self.w = ( np.random.random( size=self.hidden_layer_size ) - 0.5 ) * 2 * init_weight_max_magnitude
# the number of epochs seen
self.epoch = 0
##
# Trains the network using the given data.
# We will use min-batched stochastic gradient descent
def train( self, data, max_epochs = np.inf, min_epochs = 0 ):
# compute the pre-training log likelihood
pre_ll = self.ll( data )
# keep feeding in training data
counter = 0
while counter < max_epochs:
# count new epoch
self.epoch += 1
counter += 1
# iterate over each element randomly
for i in xrange(len(data)):
idx = np.random.choice(len(data))
(input,output) = data[idx]
# convert from data to input x (including clamped last)
x = np.array( [ input, 1.0] )
y = np.array( [ output ] )
# update weights for input
self.update_weights_using_gradient_learning( x, y )
# grab new log likelihood
new_ll = self.ll( data )
# stop when we are no longer better
if counter >= min_epochs and new_ll <= pre_ll:
break
return counter
##
# Produce output for inputs
def regress(self, xs):
ys = []
for x in xs:
x = np.array([x,1.0])
z = np.zeros( self.hidden_layer_size )
for h_i in xrange(self.hidden_layer_size):
z_i = np.dot( self.V[h_i,:], x )
z_i = scipy.special.expit( z_i )
z[ h_i ] = z_i
mu = np.dot( self.w, z )
y = mu
ys.append( y )
return ys
##
# The log likelihood for a data set
def ll(self, data):
res = 0.0
for (e,y) in data:
x = np.array( [e, 1.0] )
y = np.array( [y] )
res += self._single_ll( x, y )
return res
##
# The log likelihood for a y given an x
def _single_ll(self, x, y ):
# compute z
z = np.zeros( self.hidden_layer_size )
for h_i in xrange(self.hidden_layer_size):
z_i = np.dot( self.V[h_i,:], x )
z_i = scipy.special.expit( z_i )
z[ h_i ] = z_i
# Ok, merge for gaussian mean
mu = np.dot( self.w, z )
# compute pdf
return scipy.stats.norm.logpdf( y, loc=mu, scale=self.output_sigma)
##
# Update weights using gradient and learning rate
def update_weights_using_gradient_learning(self, x, y ):
# Foward pass, compute inputs and such for nertwork
a = np.zeros( shape=(self.hidden_layer_size,1) )
z = np.zeros( shape=(self.hidden_layer_size,1) )
b = np.zeros( shape=(1,1) )
for h_i in xrange(self.hidden_layer_size):
a_i = np.dot( self.V[ h_i, :] , x )
a[ h_i ] = a_i
z_i = scipy.special.expit( a_i )
z[ h_i ] = z_i
b = np.dot( self.w, z )
# ok, gradient updating using learning rate :-)
# output layer error
d2 = b - y
# backpropagate to get hidden layer error
d1 = np.zeros( shape=(self.hidden_layer_size,1) )
for h_i in xrange( self.hidden_layer_size ):
sig_a = scipy.special.expit( a[h_i] )
deriv_a = sig_a * (1.0 - sig_a)
d1[h_i] = d2 * self.w[h_i] * deriv_a
# Ok, we now have the gradient, so move in the opposite
# direction multiplied by hte learning rate
for h_i in xrange(self.hidden_layer_size):
self.w[h_i] += self.learning_rate * (- d2 * z[h_i])
self.V[h_i,:] += self.learning_rate * (- d1[h_i] * x)
def visualize_inputs_to_final_layer(self, data):
plt.figure()
xs = map(lambda o: o[0], data)
wzs = []
for x in xs:
x = np.array([x,1.0])
z = np.zeros( self.hidden_layer_size )
for h_i in xrange(self.hidden_layer_size):
z_i = np.dot( self.V[h_i,:], x )
z_i = scipy.special.expit( z_i )
z[ h_i ] = z_i
wzs.append( self.w * z )
wzs = np.asarray(wzs)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
for i in range(self.hidden_layer_size):
x = xs
y = wzs[:, i]
ax.plot( x, y, label="W_{0} * z_{0}".format( i ) )
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
##=========================================================================
def plot_fits( nn,
data,
train_epochs = 100,
num_iterations = 5 ):
# grab only the xs
xs = map(lambda o: o[0], data )
ys = map(lambda o: o[1], data )
# ok, see the regression after a number of training epochs
regs = []
lls = []
regs.append( nn.regress( xs ) )
lls.append( nn.ll( data ) )
for i in xrange(num_iterations):
nn.train( data,
min_epochs = train_epochs,
max_epochs = train_epochs )
regs.append( nn.regress( xs ) )
lls.append( nn.ll( data ) )
# plot the regressions
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
for i,(reg,ll) in enumerate(zip(regs,lls)):
x = xs
y = reg
ax.plot( x, y, label="{0} ll={1}".format( i * train_epochs,
ll) )
# plot hte actual data
ax.plot( xs, ys, 'k--', lw=2, label='data')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
##=========================================================================
def generate_hill_data(num_points=100):
x = np.linspace( -10, 10, num_points )
y = np.power( x, 2.0 )
y -= np.mean(y)
return zip( x, y )
##=========================================================================
def generate_periodic_data(num_points=100, period=10):
x = np.linspace( -10, 10, num_points )
y = np.sin( x * 2 * np.pi * period / num_points )
y -= np.mean(y)
return zip( x, y )
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
##=========================================================================
| apache-2.0 |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/mpl_toolkits/axisartist/angle_helper.py | 7 | 14787 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from math import floor
import numpy as np
import math
A = np.array
from mpl_toolkits.axisartist.grid_finder import ExtremeFinderSimple
def select_step_degree(dv):
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [ 1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_factors = [1.] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = A(minsec_limits_)*(1./60.)
minute_factors = [60.] * len(minute_limits_)
second_limits_ = A(minsec_limits_)*(1./3600.)
second_factors = [3600.] * len(second_limits_)
degree_limits = np.concatenate([second_limits_,
minute_limits_,
degree_limits_])
degree_steps = np.concatenate([minsec_steps_,
minsec_steps_,
degree_steps_])
degree_factors = np.concatenate([second_factors,
minute_factors,
degree_factors])
n = degree_limits.searchsorted(dv)
step = degree_steps[n]
factor = degree_factors[n]
return step, factor
def select_step_hour(dv):
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2 , 3, 4, 6, 8, 12, 18, 24]
hour_factors = [1.] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = A(minsec_limits_)*(1./60.)
minute_factors = [60.] * len(minute_limits_)
second_limits_ = A(minsec_limits_)*(1./3600.)
second_factors = [3600.] * len(second_limits_)
hour_limits = np.concatenate([second_limits_,
minute_limits_,
hour_limits_])
hour_steps = np.concatenate([minsec_steps_,
minsec_steps_,
hour_steps_])
hour_factors = np.concatenate([second_factors,
minute_factors,
hour_factors])
n = hour_limits.searchsorted(dv)
step = hour_steps[n]
factor = hour_factors[n]
return step, factor
def select_step_sub(dv):
# subarcsec or degree
tmp = 10.**(int(math.log10(dv))-1.)
factor = 1./tmp
if 1.5*tmp >= dv:
step = 1
elif 3.*tmp >= dv:
step = 2
elif 7.*tmp >= dv:
step = 5
else:
step = 1
factor = 0.1*factor
return step, factor
def select_step(v1, v2, nv, hour=False, include_last=True,
threshold_factor=3600.):
if v1 > v2:
v1, v2 = v2, v1
dv = float(v2 - v1) / nv
if hour:
_select_step = select_step_hour
cycle = 24.
else:
_select_step = select_step_degree
cycle = 360.
# for degree
if dv > 1./threshold_factor:
#print "degree"
step, factor = _select_step(dv)
else:
step, factor = select_step_sub(dv*threshold_factor)
#print "feac", step, factor
factor = factor * threshold_factor
f1, f2, fstep = v1*factor, v2*factor, step/factor
levs = np.arange(math.floor(f1/step), math.ceil(f2/step)+0.5,
1, dtype="i") * step
# n : number of valid levels. If there is a cycle, e.g., [0, 90, 180,
# 270, 360], the grid line needs to be extended from 0 to 360, so
# we need to return the whole array. However, the last level (360)
# needs to be ignored often. In this case, so we return n=4.
n = len(levs)
# we need to check the range of values
# for example, -90 to 90, 0 to 360,
if factor == 1. and (levs[-1] >= levs[0]+cycle): # check for cycle
nv = int(cycle / step)
if include_last:
levs = levs[0] + np.arange(0, nv+1, 1) * step
else:
levs = levs[0] + np.arange(0, nv, 1) * step
n = len(levs)
return np.array(levs), n, factor
def select_step24(v1, v2, nv, include_last=True, threshold_factor=3600):
v1, v2 = v1/15., v2/15.
levs, n, factor = select_step(v1, v2, nv, hour=True,
include_last=include_last,
threshold_factor=threshold_factor)
return levs*15., n, factor
def select_step360(v1, v2, nv, include_last=True, threshold_factor=3600):
return select_step(v1, v2, nv, hour=False,
include_last=include_last,
threshold_factor=threshold_factor)
class LocatorBase(object):
def __init__(self, den, include_last=True):
self.den = den
self._include_last = include_last
def _get_nbins(self):
return self.den
def _set_nbins(self, v):
self.den = v
nbins = property(_get_nbins, _set_nbins)
def set_params(self, **kwargs):
if "nbins" in kwargs:
self.den = int(kwargs.pop("nbins"))
if kwargs:
raise ValueError("Following keys are not processed: %s" % \
", ".join([str(k) for k in kwargs.keys()]))
class LocatorHMS(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last)
class LocatorHM(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last,
threshold_factor=60)
class LocatorH(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last,
threshold_factor=1)
class LocatorDMS(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last)
class LocatorDM(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last,
threshold_factor=60)
class LocatorD(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last,
threshold_factor=1)
class FormatterDMS(object):
deg_mark = "^{\circ}"
min_mark = "^{\prime}"
sec_mark = "^{\prime\prime}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\mkern-4mu"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def _get_number_fraction(self, factor):
## check for fractional numbers
number_fraction = None
# check for 60
for threshold in [1, 60, 3600]:
if factor <= threshold:
break
d = factor // threshold
int_log_d = int(floor(math.log10(d)))
if 10**int_log_d == d and d!=1:
number_fraction = int_log_d
factor = factor // 10**int_log_d
return factor, number_fraction
return factor, number_fraction
def __call__(self, direction, factor, values):
if len(values) == 0:
return []
#ss = [[-1, 1][v>0] for v in values] #not py24 compliant
values = np.asarray(values)
ss = np.where(values>0, 1, -1)
sign_map = {(-1, True):"-"}
signs = [sign_map.get((s, v!=0), "") for s, v in zip(ss, values)]
factor, number_fraction = self._get_number_fraction(factor)
values = np.abs(values)
if number_fraction is not None:
values, frac_part = divmod(values, 10**number_fraction)
frac_fmt = "%%0%dd" % (number_fraction,)
frac_str = [frac_fmt % (f1,) for f1 in frac_part]
if factor == 1:
if number_fraction is None:
return [self.fmt_d % (s*int(v),) for (s, v) in zip(ss, values)]
else:
return [self.fmt_ds % (s*int(v), f1) for (s, v, f1) in \
zip(ss, values, frac_str)]
elif factor == 60:
deg_part, min_part = divmod(values, 60)
if number_fraction is None:
return [self.fmt_d_m % (s1, d1, m1) \
for s1, d1, m1 in zip(signs, deg_part, min_part)]
else:
return [self.fmt_d_ms % (s, d1, m1, f1) \
for s, d1, m1, f1 in zip(signs, deg_part, min_part, frac_str)]
elif factor == 3600:
if ss[-1] == -1:
inverse_order = True
values = values[::-1]
sings = signs[::-1]
else:
inverse_order = False
l_hm_old = ""
r = []
deg_part, min_part_ = divmod(values, 3600)
min_part, sec_part = divmod(min_part_, 60)
if number_fraction is None:
sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part]
else:
sec_str = [self.fmt_ss_partial % (s1, f1) for s1, f1 in zip(sec_part, frac_str)]
for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str):
l_hm = self.fmt_d_m_partial % (s, d1, m1)
if l_hm != l_hm_old:
l_hm_old = l_hm
l = l_hm + s1 #l_s
else:
l = "$"+s1 #l_s
r.append(l)
if inverse_order:
return r[::-1]
else:
return r
else: # factor > 3600.
return [r"$%s^{\circ}$" % (str(v),) for v in ss*values]
class FormatterHMS(FormatterDMS):
deg_mark = "^\mathrm{h}"
min_mark = "^\mathrm{m}"
sec_mark = "^\mathrm{s}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\!\!"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def __call__(self, direction, factor, values): # hour
return FormatterDMS.__call__(self, direction, factor, np.asarray(values)/15.)
class ExtremeFinderCycle(ExtremeFinderSimple):
"""
When there is a cycle, e.g., longitude goes from 0-360.
"""
def __init__(self,
nx, ny,
lon_cycle = 360.,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (-90, 90)
):
#self.transfrom_xy = transform_xy
#self.inv_transfrom_xy = inv_transform_xy
self.nx, self.ny = nx, ny
self.lon_cycle, self.lat_cycle = lon_cycle, lat_cycle
self.lon_minmax = lon_minmax
self.lat_minmax = lat_minmax
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
get extreme values.
x1, y1, x2, y2 in image coordinates (0-based)
nx, ny : number of divisions in each axis
"""
x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
x, y = np.meshgrid(x_, y_)
lon, lat = transform_xy(np.ravel(x), np.ravel(y))
# iron out jumps, but algorithm should be improved.
# This is just naive way of doing and my fail for some cases.
# Consider replacing this with numpy.unwrap
# We are ignoring invalid warnings. They are triggered when
# comparing arrays with NaNs using > We are already handling
# that correctly using np.nanmin and np.nanmax
with np.errstate(invalid='ignore'):
if self.lon_cycle is not None:
lon0 = np.nanmin(lon)
lon -= 360. * ((lon - lon0) > 180.)
if self.lat_cycle is not None:
lat0 = np.nanmin(lat)
lat -= 360. * ((lat - lat0) > 180.)
lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)
lon_min, lon_max, lat_min, lat_max = \
self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)
return lon_min, lon_max, lat_min, lat_max
def _adjust_extremes(self, lon_min, lon_max, lat_min, lat_max):
lon_min, lon_max, lat_min, lat_max = \
self._add_pad(lon_min, lon_max, lat_min, lat_max)
# check cycle
if self.lon_cycle:
lon_max = min(lon_max, lon_min + self.lon_cycle)
if self.lat_cycle:
lat_max = min(lat_max, lat_min + self.lat_cycle)
if self.lon_minmax is not None:
min0 = self.lon_minmax[0]
lon_min = max(min0, lon_min)
max0 = self.lon_minmax[1]
lon_max = min(max0, lon_max)
if self.lat_minmax is not None:
min0 = self.lat_minmax[0]
lat_min = max(min0, lat_min)
max0 = self.lat_minmax[1]
lat_max = min(max0, lat_max)
return lon_min, lon_max, lat_min, lat_max
if __name__ == "__main__":
#test2()
#print select_step360(21.2, 33.3, 5)
#print select_step360(20+21.2/60., 21+33.3/60., 5)
#print select_step360(20.5+21.2/3600., 20.5+33.3/3600., 5)
# test threshold factor
print(select_step360(20.5+11.2/3600., 20.5+53.3/3600., 5,
threshold_factor=60))
print(select_step360(20.5+11.2/3600., 20.5+53.3/3600., 5,
threshold_factor=1))
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", 600, [12301, 12302, 12303]))
print(select_step360(20.5+21.2/3600., 20.5+21.4/3600., 5))
print(fmt("left", 36000, [738210, 738215, 738220]))
print(fmt("left", 360000, [7382120, 7382125, 7382130]))
print(fmt("left", 1., [45, 46, 47]))
print(fmt("left", 10., [452, 453, 454]))
if 0:
print(select_step360(20+21.2/60., 21+33.3/60., 5))
print(select_step360(20.5+21.2/3600., 20.5+33.3/3600., 5))
print(select_step360(20+21.2/60., 20+53.3/60., 5))
###
levs, n, factor = select_step360(20.5+21.2/3600., 20.5+27.25/3600., 5)
levs = levs * 0.1
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", factor, levs))
print(select_step(-180, 180, 10, hour=False))
print(select_step(-12, 12, 10, hour=True))
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", 3600, [0, -30, -60]))
| mit |
hsuantien/scikit-learn | sklearn/ensemble/weight_boosting.py | 30 | 40648 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
luoq/qanta | classify/learn_classifiers.py | 1 | 8260 | from numpy import *
from rnn.propagation import *
import nltk.classify.util
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.linear_model import LogisticRegression
from nltk.corpus import stopwords
from collections import Counter
import cPickle
# create question dictionary such that sentences belonging to the same
# question are grouped together, {question ID: {sentence position: tree}}
def collapse_questions(train_trees, test_trees):
train_q = {}
for tree in train_trees:
if tree.qid not in train_q:
train_q[tree.qid] = {}
train_q[tree.qid][tree.dist] = tree
test_q = {}
for tree in test_trees:
if tree.qid not in test_q:
test_q[tree.qid] = {}
test_q[tree.qid][tree.dist] = tree
return train_q, test_q
# - full evaluation on test data, returns accuracy on all sentence positions
# within a question including full question accuracy
# - can add / remove features to replicate baseline models described in paper
# - bow_feats is unigrams, rel_feats is dependency relations
def evaluate(data_split, model_file, d, rnn_feats=True, bow_feats=False, rel_feats=False):
stop = stopwords.words('english')
vocab, rel_list, ans_list, tree_dict = \
cPickle.load(open(data_split, 'rb'))
train_trees = tree_dict['train'] + tree_dict['dev']
test_trees = tree_dict['test'] + tree_dict['devtest']
params, vocab, rel_list = cPickle.load(open(model_file, 'rb'))
(rel_dict, Wv, b, We) = params
data = [train_trees, test_trees]
# get rid of trees that the parser messed up on
for sn, split in enumerate(data):
bad_trees = []
for ind, tree in enumerate(split):
if tree.get(0).is_word == 0:
# print tree.get_words()
bad_trees.append(ind)
continue
# print 'removed', len(bad_trees)
for ind in bad_trees[::-1]:
split.pop(ind)
# adding lookup
ans_list = array([vocab.index(ans) for ans in ans_list])
for split in data:
for tree in split:
for node in tree.get_nodes():
node.vec = We[:, node.ind].reshape( (d, 1))
tree.ans_list = ans_list[ans_list != tree.ans_ind]
train_q, test_q = collapse_questions(train_trees, test_trees)
# print 'number of training questions:', len(train_q)
# print 'number of testing questions:', len(test_q)
train_feats = []
test_feats = []
test_ord = []
for tt, split in enumerate([train_q, test_q]):
# if tt == 0:
# print 'processing train'
# else:
# print 'processing test'
# for each question in the split
for qid in split:
q = split[qid]
ave = zeros( (d, 1))
words = zeros ( (d, 1))
bow = []
count = 0.
curr_ave = None
curr_words = None
# for each sentence in the question, generate features
for i in range(0, len(q)):
try:
tree = q[i]
except:
continue
curr_feats = {}
if rnn_feats:
forward_prop(None, params, tree, d, labels=False)
# features: average of hidden representations and average of word embeddings
for ex, node in enumerate(tree.get_nodes()):
if node.word not in stop:
ave += node.p_norm
words += node.vec
count += 1.
if count > 0:
curr_ave = ave / count
curr_words = words / count
featvec = concatenate([curr_ave.flatten(), curr_words.flatten()])
# add QANTA's features to the current feature set
for dim, val in ndenumerate(featvec):
curr_feats['__' + str(dim)] = val
# add unigram indicator features to the current feature set
if bow_feats:
bow += [l.word for l in tree.get_nodes()]
for word in bow:
curr_feats[word] = 1.0
# add dependency relation indicator features to the current feature set
if rel_feats:
for l in tree.get_nodes():
if len(l.parent) > 0:
par, rel = l.parent[0]
this_rel = l.word + '__' + rel + '__' + tree.get(par).word
curr_feats[this_rel] = 1.0
if tt == 0:
train_feats.append( (curr_feats, tree.ans.lower()) )
else:
test_feats.append( (curr_feats, tree.ans.lower()) )
test_ord.append(tree)
# print 'total training instances:', len(train_feats)
# print 'total testing instances:', len(test_feats)
# can modify this classifier / do grid search on regularization parameter using sklearn
classifier = SklearnClassifier(LogisticRegression(C=10))
classifier.train(train_feats)
print 'accuracy train:', nltk.classify.util.accuracy(classifier, train_feats)
print 'accuracy test:', nltk.classify.util.accuracy(classifier, test_feats)
print ''
# finer-grained evaluation, see how well QANTA does at each sentence position
pred = classifier.batch_classify([fs for (fs,l) in test_feats])
count_dists = Counter()
corr_dists = Counter()
for ind, tree in enumerate(test_ord):
curr_dist = tree.dist
count_dists[curr_dist] += 1.0
label = tree.ans
if label == pred[ind]:
corr_dists[curr_dist] += 1.0
prob_dists = {}
print 'sentence position: correctly answered at that position, total sentences at that position,',\
'accuracy'
for key in corr_dists:
prob_dists[key] = corr_dists[key] / count_dists[key]
print key, ': ', corr_dists[key], count_dists[key], prob_dists[key]
# - returns single sentence accuracy on training / validation set
# - use ONLY for hyperparameter tuning / early stopping criteria
# - this returns single sentence accuracy, not question-level accuracy
# - a logistic regression classifier is trained on the average hidden representation
# of all nodes in the tree. the full evaluation (in the evaluate method)
# - includes the average word embeddings in addition to collapsing sentences
# belonging to the same question
def validate(data, params, d):
stop = stopwords.words('english')
(rel_dict, Wv, b, L) = params
print 'validating, adding lookup'
for split in data:
for tree in split:
for node in tree.get_nodes():
node.vec = L[:, node.ind].reshape( (d, 1))
train_feats = []
val_feats = []
for tt, split in enumerate(data):
if tt == 0:
print 'processing train'
else:
print 'processing val'
for num_finished, tree in enumerate(split):
# process validation trees
forward_prop(None, params, tree, d, labels=False)
ave = zeros( (d, 1))
words = zeros ( (d, 1))
count = 0
wcount = 0
word_list = []
for ex, node in enumerate(tree.get_nodes()):
if ex != 0 and node.word not in stop:
ave += node.p_norm
count += 1
ave = ave / count
featvec = ave.flatten()
curr_feats = {}
for dim, val in ndenumerate(featvec):
curr_feats['_' + str(dim)] = val
if tt == 0:
train_feats.append( (curr_feats, tree.ans) )
else:
val_feats.append( (curr_feats, tree.ans) )
print 'training'
classifier = SklearnClassifier(LogisticRegression(C=10))
classifier.train(train_feats)
print 'predicting...'
train_acc = nltk.classify.util.accuracy(classifier, train_feats)
val_acc = nltk.classify.util.accuracy(classifier, val_feats)
return train_acc, val_acc
| mit |
derkling/trappy | trappy/cpu_power.py | 1 | 5420 | # Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the cpu_cooling devices in the current
directory's trace.dat"""
import pandas as pd
from trappy.base import Base
from trappy.run import Run
def pivot_with_labels(dfr, data_col_name, new_col_name, mapping_label):
"""Pivot a DataFrame row into columns
dfr is the DataFrame to operate on. data_col_name is the name of
the column in the DataFrame which contains the values.
new_col_name is the name of the column in the DataFrame that will
became the new columns. mapping_label is a dictionary whose keys
are the values in new_col_name and whose values are their
corresponding name in the DataFrame to be returned.
There has to be a more "pandas" way of doing this.
Example:
In [8]: dfr_in = pd.DataFrame({'cpus': ["000000f0", "0000000f", "000000f0", "0000000f"], 'freq': [1, 3, 2, 6]})
In [9]: dfr_in
Out[9]:
cpus freq
0 000000f0 1
1 0000000f 3
2 000000f0 2
3 0000000f 6
[4 rows x 2 columns]
In [10]: map_label = {"000000f0": "A15", "0000000f": "A7"}
In [11]: power.pivot_with_labels(dfr_in, "freq", "cpus", map_label)
Out[11]:
A15 A7
0 1 NaN
1 1 3
2 2 3
3 2 6
[4 rows x 2 columns]
"""
col_set = set(dfr[new_col_name])
ret_series = {}
for col in col_set:
try:
label = mapping_label[col]
except KeyError:
available_keys = ", ".join(mapping_label.keys())
error_str = '"{}" not found, available keys: {}'.format(col,
available_keys)
raise KeyError(error_str)
data = dfr[dfr[new_col_name] == col][data_col_name]
ret_series[label] = data
return pd.DataFrame(ret_series).fillna(method="pad")
def num_cpus_in_mask(mask):
"""Return the number of cpus in a cpumask"""
mask = mask.replace(",", "")
value = int(mask, 16)
return bin(value).count("1")
class CpuOutPower(Base):
"""Process the cpufreq cooling power actor data in a ftrace dump"""
unique_word = "thermal_power_cpu_limit"
name = "cpu_out_power"
pivot = "cpus"
def __init__(self):
super(CpuOutPower, self).__init__(
unique_word=self.unique_word,
)
def get_all_freqs(self, mapping_label):
"""get a DataFrame with the maximum frequencies allowed by the governor
mapping_label must be a dictionary that maps cpumasks to name
of the cpu. Returned freqs are in MHz
"""
dfr = self.data_frame
return pivot_with_labels(dfr, "freq", "cpus", mapping_label) / 1000
Run.register_class(CpuOutPower, "thermal")
class CpuInPower(Base):
"""Process the cpufreq cooling power actor data in a ftrace dump"""
unique_word = "thermal_power_cpu_get"
name = "cpu_in_power"
pivot = "cpus"
def __init__(self):
super(CpuInPower, self).__init__(
unique_word=self.unique_word,
)
def _get_load_series(self):
"""get a pandas.Series with the aggregated load"""
dfr = self.data_frame
load_cols = [s for s in dfr.columns if s.startswith("load")]
load_series = dfr[load_cols[0]].copy()
for col in load_cols[1:]:
load_series += dfr[col]
return load_series
def get_load_data(self, mapping_label):
"""return a dataframe suitable for plot_load()
mapping_label is a dictionary mapping cluster cpumasks to labels."""
dfr = self.data_frame
load_series = self._get_load_series()
load_dfr = pd.DataFrame({"cpus": dfr["cpus"], "load": load_series})
return pivot_with_labels(load_dfr, "load", "cpus", mapping_label)
def get_normalized_load_data(self, mapping_label):
"""return a dataframe for plotting normalized load data
mapping_label should be a dictionary mapping cluster cpumasks
to labels
"""
dfr = self.data_frame
load_series = self._get_load_series()
load_series *= dfr['freq']
for cpumask in mapping_label:
num_cpus = num_cpus_in_mask(cpumask)
idx = dfr["cpus"] == cpumask
max_freq = max(dfr[idx]["freq"])
load_series[idx] = load_series[idx] / (max_freq * num_cpus)
load_dfr = pd.DataFrame({"cpus": dfr["cpus"], "load": load_series})
return pivot_with_labels(load_dfr, "load", "cpus", mapping_label)
def get_all_freqs(self, mapping_label):
"""get a DataFrame with the "in" frequencies as seen by the governor
Frequencies are in MHz
"""
dfr = self.data_frame
return pivot_with_labels(dfr, "freq", "cpus", mapping_label) / 1000
Run.register_class(CpuInPower, "thermal")
| apache-2.0 |
aubio/aubio | python/demos/demo_specdesc.py | 5 | 2580 | #! /usr/bin/env python
import sys
import numpy as np
from aubio import source, pvoc, specdesc
win_s = 512 # fft size
hop_s = win_s // 4 # hop size
if len(sys.argv) < 2:
print("Usage: %s <filename> [samplerate]" % sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
pv = pvoc(win_s, hop_s)
methods = ['default', 'energy', 'hfc', 'complex', 'phase', 'specdiff', 'kl',
'mkl', 'specflux', 'centroid', 'slope', 'rolloff', 'spread', 'skewness',
'kurtosis', 'decrease',]
all_descs = {}
o = {}
for method in methods:
cands = []
all_descs[method] = np.array([])
o[method] = specdesc(method, win_s)
total_frames = 0
downsample = 2
while True:
samples, read = s()
fftgrain = pv(samples)
#outstr = "%f" % ( total_frames / float(samplerate) )
for method in methods:
specdesc_val = o[method](fftgrain)[0]
all_descs[method] = np.append(all_descs[method], specdesc_val)
#outstr += " %f" % specdesc_val
#print(outstr)
total_frames += read
if read < hop_s: break
if 1:
print("done computing, now plotting")
import matplotlib.pyplot as plt
from demo_waveform_plot import get_waveform_plot
from demo_waveform_plot import set_xlabels_sample2time
fig = plt.figure()
plt.rc('lines',linewidth='.8')
wave = plt.axes([0.1, 0.75, 0.8, 0.19])
get_waveform_plot(filename, samplerate, block_size = hop_s, ax = wave )
wave.yaxis.set_visible(False)
wave.xaxis.set_visible(False)
all_desc_times = [ x * hop_s for x in range(len(all_descs["default"])) ]
n_methods = len(methods)
for i, method in enumerate(methods):
#ax = fig.add_subplot (n_methods, 1, i)
#plt2 = plt.axes([0.1, 0.1, 0.8, 0.65], sharex = plt1)
ax = plt.axes ( [0.1, 0.75 - ((i+1) * 0.65 / n_methods), 0.8, 0.65 / n_methods], sharex = wave )
ax.plot(all_desc_times, all_descs[method], '-', label = method)
#ax.set_ylabel(method, rotation = 0)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.axis(xmax = all_desc_times[-1], xmin = all_desc_times[0])
ax.annotate(method, xy=(-10, 0), xycoords='axes points',
horizontalalignment='right', verticalalignment='bottom',
)
set_xlabels_sample2time(ax, all_desc_times[-1], samplerate)
#plt.ylabel('spectral descriptor value')
ax.xaxis.set_visible(True)
plt.show()
| gpl-3.0 |
antoinearnoud/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/depenses_par_categories/plot_ticpe_depenses_par_decile.py | 4 | 2613 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 11:09:50 2016
@author: thomas.douenne
"""
# Ce script a pour objectif de décrire pour chaque décile de revenu la consommation annuelle moyenne de carburants,
# ainsi que les dépenses moyennes pour la TICPE
# Import de modules généraux
from __future__ import division
import pandas
import seaborn
from pandas import concat
# Import de modules spécifiques à Openfisca
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_line
from openfisca_france_indirect_taxation.surveys import SurveyScenario
# Import d'une nouvelle palette de couleurs
seaborn.set_palette(seaborn.color_palette("Set2", 12))
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
# Sélection des variables que l'on veut simuler
simulated_variables = [
'ticpe_totale',
'diesel_ticpe',
'essence_ticpe',
'depenses_carburants',
'depenses_diesel',
'depenses_essence'
]
# Le but est de construire un graphique représentant les 3 années pour chaque variable. On fait donc une boucle
# dans une boucle.
to_graph = ['ticpe totale ', 'ticpe diesel ', 'ticpe essence ', 'depenses carburants ', 'depenses diesel ',
'depenses essence ']
for element in to_graph:
depenses = None
for year in [2000, 2005, 2011]:
survey_scenario = SurveyScenario.create(year = year)
pivot_table = pandas.DataFrame()
for values in simulated_variables:
pivot_table = pandas.concat([
pivot_table,
survey_scenario.compute_pivot_table(values = [values], columns = ['niveau_vie_decile'])
])
df = pivot_table.T
df.rename(columns = {'ticpe_totale': 'ticpe totale {}'.format(year),
'diesel_ticpe': 'ticpe diesel {}'.format(year),
'essence_ticpe': 'ticpe essence {}'.format(year),
'depenses_carburants': 'depenses carburants {}'.format(year),
'depenses_diesel': 'depenses diesel {}'.format(year),
'depenses_essence': 'depenses essence {}'.format(year)},
inplace = True)
if depenses is not None:
depenses = concat(
[depenses, df[element + '{}'.format(year)]], axis = 1)
else:
depenses = df[element + '{}'.format(year)]
graph_builder_line(depenses)
| agpl-3.0 |
markomanninen/isopsephy | isopsephy/_main.py | 2 | 15586 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
import re
import pandas as pd
import search
data = {}
"""
Data mapping between roman and greek letters, isopsephy values and linguistic components
Resources:
- http://en.wikipedia.org/wiki/Greek_alphabet
- http://www.chlt.org/FirstGreekBook/JWW_FGB1.html
- http://www.webtopos.gr/eng/languages/greek/alphabet/alpha.htm
- http://www.class.uh.edu/mcl/faculty/pozzi/grnl1/intr/0.2.1.pract.vow.htm
Segments:
- vowel
- consonant
- numeral
Subsegments:
- semivowel (liquid, siblant and γ-nasal not specified on data table)
- double
- mute
Mutes (not specified on data table):
{class-order} {letter}
labial-smooth π
labial-middle β
labial-rought φ
palatal-smooth κ
palatal-middle γ
palatal-rought χ
lingual-smooth τ
lingual-middle δ
lingual-rought θ
Seven vowels: α ε η ι ο υ ω (a e h i o u w)
Numerals: ϛ ϙ ϡ (6, 90, 900)
If numerals are found on greek text, their values are used on isopsephy calculation
but there are no corresponding roman letters for them. This is mainly because roman alphabet
has only 26 letters, so one would need to use arbitrary 2 letters (like j, v) and some other special
character for the last letter like _ or number or anything. But it doesn't really make sense.
My choice was just to use numerical value on transliterated text, so ϛ, ϙ and ϡ will be transliterated to 6, 90, 900.
However, if you transform roman to greek, only literals are handled, no numbers at all.
"""
# letters from α to θ (1 to 9)
# alpha:http://en.wiktionary.org/wiki/ἄλφα
data[1] = {'greek': u'α',
'capital': u'Α',
'name': u'αλφα',
'segment': 'vowel',
'subsegment': 'short',
'roman': 'a',
'value': 1}
# beta:http://en.wiktionary.org/wiki/βῆτα
data[2] = {'greek': u'β',
'capital': u'Β',
'name': u'βητα',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'b',
'value': 2}
# gamma:http://en.wiktionary.org/wiki/γάμμα
data[3] = {'greek': u'γ',
'capital': u'Γ',
'name': u'γαμμα',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'g',
'value': 3}
# delta:http://en.wiktionary.org/wiki/δέλτα
data[4] = {'greek': u'δ',
'capital': u'Δ',
'name': u'δελτα',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'd',
'value': 4}
# epsilon:http://en.wiktionary.org/wiki/epsilon
data[5] = {'greek': u'ε',
'capital': u'Ε',
'name': u'ε ψιλον',
'segment': 'vowel',
'subsegment': 'short',
'roman': 'e',
'value': 5}
# digamma/stigma/episemon/wau
# http://en.wikipedia.org/wiki/Digamma
data[6] = {'greek': u'ϛ', 'small2': u'ϝ',
'capital': u'Ϛ', 'capital2': u'Ϝ',
'name': u'διγαμμα', 'name2': u'στιγμα', 'name3': u'επισημον', 'name4': u'βαυ',
'segment': 'numeral',
#'subsegment': '',
'roman': '6',
'value': 6}
# zeta:http://en.wiktionary.org/wiki/ζῆτα
data[7] = {'greek': u'ζ',
'capital': u'Ζ',
'name': u'ζητα',
'segment': 'consonant',
'subsegment': 'double',
'roman': 'z',
'value': 7}
# eta:http://en.wiktionary.org/wiki/ἦτα
data[8] = {'greek': u'η',
'capital': u'Η',
'name': u'ητα',
'segment': 'vowel',
'subsegment': 'long',
'roman': 'h',
'value': 8}
# theta:http://en.wiktionary.org/wiki/θῆτα
data[9] = {'greek': u'θ',
'capital': u'Θ',
'name': u'θητα',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'q',
'value': 9}
# letters from ι to ϙ (10 to 90)
# iota:http://en.wiktionary.org/wiki/ἰῶτα
data[10] = {'greek': u'ι',
'capital': u'Ι',
'name': u'ιωτα',
'segment': 'vowel',
'subsegment': 'short',
'roman': 'i',
'value': 10}
# kappa:http://en.wiktionary.org/wiki/κάππα
data[20] = {'greek': u'κ',
'capital': u'Κ',
'name': u'καππα',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'k',
'value': 20}
# lambda:http://en.wiktionary.org/wiki/λάμβδα
data[30] = {'greek': u'λ',
'capital': u'Λ',
'name': u'λαμβδα',
'segment': 'consonant',
'subsegment': 'semivowel',
'roman': 'l',
'value': 30}
# mu:http://en.wiktionary.org/wiki/mu
data[40] = {'greek': u'μ',
'capital': u'Μ',
'name': u'μυ',
'segment': 'consonant',
'subsegment': 'semivowel',
'roman': 'm',
'value': 40}
# nu:http://en.wiktionary.org/wiki/νῦ
data[50] = {'greek': u'ν',
'capital': u'Ν',
'name': u'νυ',
'segment': 'consonant',
'subsegment': 'semivowel',
'roman': 'n',
'value': 50}
# xi:http://en.wiktionary.org/wiki/ξεῖ
data[60] = {'greek': u'ξ',
'capital': u'Ξ',
'name': u'ξει',
'segment': 'consonant',
'subsegment': 'double',
'roman': 'c',
'value': 60}
# omicron:http://en.wiktionary.org/wiki/omicron
data[70] = {'greek': u'ο',
'capital': u'Ο',
'name': u'ο μικρον',
'segment': 'vowel',
'subsegment': 'short',
'roman': 'o',
'value': 70}
# pi:http://en.wiktionary.org/wiki/πεῖ
data[80] = {'greek': u'π',
'capital': u'Π',
'name': u'πει',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'p',
'value': 80}
# koppa:http://en.wikipedia.org/wiki/Koppa_(letter)
# http://www.webtopos.gr/eng/languages/greek/alphabet/earlyletters.htm
data[90] = {'greek': u'ϙ', 'small2': u'ϟ',
'capital': u'Ϙ', 'capital2': u'Ϟ',
'name': u'κοππα',
'segment': 'numeral',
#'subsegment': '',
'roman': '90',
'value': 90}
# letters from ρ to ϡ (100 to 900)
# rho:http://en.wiktionary.org/wiki/ῥῶ
data[100] = {'greek': u'ρ',
'capital': u'Ρ',
'name': u'ρω',
'segment': 'consonant',
'subsegment': 'semivowel',
'roman': 'r',
'value': 100}
# sigma:http://en.wiktionary.org/wiki/σίγμα
data[200] = {'greek': u'σ', 'small2': u'ϲ', 'small3': u'ς',
'capital': u'Σ', 'capital2': u'Ϲ', 'capital3': u'Σ',
'name': u'σιγμα',
'segment': 'consonant',
'subsegment': 'semivowel',
'roman': 's',
'value': 200}
# tau:http://en.wiktionary.org/wiki/tau
data[300] = {'greek': u'τ',
'capital': u'Τ',
'name': u'ταυ',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 't',
'value': 300}
# upsilon:http://en.wiktionary.org/wiki/upsilon
data[400] = {'greek': u'υ',
'capital': u'ϒ', 'capital2': u'Y',
'name': u'υ ψιλον',
'segment': 'vowel',
'subsegment': 'short',
'roman': 'u',
'value': 400}
# phi:http://en.wiktionary.org/wiki/phi
data[500] = {'greek': u'φ',
'capital': u'Φ',
'name': u'φει',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'f',
'value': 500}
# khi, chi:http://en.wiktionary.org/wiki/chi
data[600] = {'greek': u'χ',
'capital': u'Χ',
'name': u'χει',
'segment': 'consonant',
'subsegment': 'mute',
'roman': 'x',
'value': 600}
# psi:http://en.wiktionary.org/wiki/psi
data[700] = {'greek': u'ψ',
'capital': u'Ψ',
'name': u'ψει',
'segment': 'consonant',
'subsegment': 'double',
'roman': 'y',
'value': 700}
# omega:http://en.wiktionary.org/wiki/omega
data[800] = {'greek': u'ω',
'capital': u'Ω',
'name': u'ω μεγα',
'segment': 'vowel',
'subsegment': 'long',
'roman': 'w',
'value': 800}
# sampi/disigma
# http://en.wikipedia.org/wiki/Sampi
# http://www.tlg.uci.edu/~opoudjis/unicode/other_nonattic.html#sampi
# http://www.parthia.com/fonts/sampi.htm
# http://www.jstor.org/stable/636031
data[900] = {'greek': u'ϡ', 'small2': u'ͳ',
'capital': u'Ϡ', 'capital2': u'Ͳ',
'name': u'σαμπι', 'name2': u'δισιγμα',
'segment': 'numeral',
#'subsegment': '',
'roman': '900',
'value': 900}
greek_roman_values = {}
greek_roman_letters = {}
roman_greek_letters = {}
keys = ['roman', 'greek', 'capital', 'capital2', 'small2', 'small3', 'small4']
for num, d in data.items():
for k in keys:
if d.has_key(k):
greek_roman_values[d[k]] = num
if k == 'roman':
if d['segment'] != 'numeral':
greek_roman_letters[d[k]] = d['greek']
greek_roman_letters[d[k].upper()] = d['capital']
greek_roman_values[d[k].upper()] = num
else:
if d.has_key('roman'):
if k == 'capital' or k == 'capital2':
roman_greek_letters[d[k]] = d['roman'].upper()
else:
roman_greek_letters[d[k]] = d['roman']
greek_letters = ''
keys = ['greek', 'capital', 'capital2', 'small2', 'small3', 'small4']
for num, d in data.items():
for k in keys:
if d.has_key(k):
#αΑβΒγΓδΔεΕϛϚϜϝζΖηΗθΘιΙυΥκΚϡϠͲͳλΛωΩμΜτΤνΝξΞοΟσΣϹϲςπΠχΧϙϘϞϟρΡψΨφΦ
greek_letters += d[k]
regex_greek_roman_values = re.compile('|'.join(greek_roman_values.keys()))
regex_greek_to_roman_letters = re.compile('|'.join(roman_greek_letters.keys()))
regex_roman_to_greek_letters = re.compile('|'.join(greek_roman_letters.keys()))
regex_has_numbers = re.compile('\d')
isopsephy_error_msg = "String '%s' contains unsupported characters for isopsephy calculation"
class IsopsephyException(Exception):
pass
def isopsephy(string):
"""
String is a greek letter, word or sentence OR roman letter representation (transliteration)
of the greek letter, word or sentence that will be converted to the numerical value letter by letter
Main function will convert input to unicode format for easier frontend, but on module logic
more straightforward function unicode_isopsephy is used.
"""
return unicode_isopsephy(unicode(string, encoding="utf-8"))
def unicode_isopsephy(string):
"""
String argument must be in unicode format.
"""
result = 0
# don't accept strings that contain numbers
if regex_has_numbers.search(string):
raise IsopsephyException(isopsephy_error_msg % string)
else:
num_str = regex_greek_roman_values.sub(lambda x: '%s ' % greek_roman_values[x.group()],
string)
# don't accept strings, that contains letters which haven't been be converted to numbers
try:
result = sum([int(i) for i in num_str.split()])
except Exception as e:
raise IsopsephyException(isopsephy_error_msg % string)
return result
def to_roman(word):
"""
Create a roman letter version of the greek word.
This will change all greek (primary), capital, capital2, small2, small3, and small4
letters to roman letter. Capital letters are honored.
"""
return regex_greek_to_roman_letters.sub(lambda x: roman_greek_letters[x.group()],
unicode(word, encoding="utf-8")).encode('utf-8')
def to_greek(word):
"""
Create a greek version of the roman letter word.
This will change a-zA-Z except j, J, v & V to the corresponding greek letters
Capital letters are honored.
"""
return regex_roman_to_greek_letters.sub(lambda x: greek_roman_letters[x.group()],
word).encode('utf-8')
names = {'name': 'name_value', 'name2': 'name_value2', 'name3': 'name_value3', 'name4': 'name_value4'}
for num, d in data.items():
for k, v in names.items():
if d.has_key(k):
d[v] = unicode_isopsephy(d[k])
# accents / diacritics for simplified greek letters
_accents_ = {}
_accents_[u'Α'] = u"Ἀ Ἄ Ἂ Ἆ ᾏ ᾈ ᾌ ᾊ ᾎ Ά Ἁ Ἅ Ἃ Ἇ Ὰ ᾼ ᾉ ᾍ ᾋ Ἀ"
_accents_[u'α'] = u"ἀ ά ἄ ᾶ ᾳ ὰ ά ἀ ἁ ἂ ἃ ἄ ἅ ἆ ἇ ᾀ ᾁ ᾂ ᾃ ᾄ ᾅ ᾆ ᾇ ᾲ ᾳ ᾴ ᾶ ᾷ"
_accents_[u'Ε'] = u"Έ Ἑ Ἕ Ἓ Ὲ Ἐ Ἔ Ἒ"
_accents_[u'ε'] = u"έ ἔ ἐ ὲ έ ἐ ἑ ἒ ἓ ἔ ἕ"
_accents_[u'Η'] = u" ᾚ ᾞ Ἠ Ἤ Ἢ Ἦ Ή Ἡ Ἥ Ἣ Ἧ Ὴ ῌ ᾙ ᾝ ᾛ ᾟ ᾘ ᾜ"
_accents_[u'η'] = u"ή ἡ ῆ ἤ ἦ ὴ ῃ ὴ ή ᾐ ᾑ ᾒ ᾓ ᾔ ᾕ ᾖ ᾗ ῂ ῃ ῄ ῆ ῇ ἠ ἡ ἢ ἣ ἤ ἥ ἦ ἧ"
_accents_[u'Ι'] = u"Ί Ἱ Ἵ Ἳ Ἷ Ὶ Ἰ Ἴ Ἲ Ἶ Ἱ"
_accents_[u'ι'] = u"ἱ ἰ ἴ ί ῖ ἷ î ì ἶ ὶ ί ῒ ΐ ῖ ῗ ἰ ἱ ἲ ἳ ἴ ἵ ἶ ἷ ϊ"
_accents_[u'Ο'] = u"Ό Ὁ Ὅ Ὃ Ὸ Ὀ Ὄ Ὂ"
_accents_[u'ο'] = u"ὁ ò ó ô ὄ ὅ ὸ ό ὀ ὁ ὂ ὃ ὄ ὅ"
_accents_[u'Ρ'] = u"Ῥ"
_accents_[u'ρ'] = u"ῥ ῤ"
_accents_[u'ϒ'] = u"Ύ Ὑ Ὕ Ὓ Ὗ Ὺ Ῡ"
_accents_[u'υ'] = u"ῦ ύ ϋ ὐ ὕ ὖ ù ὑ ὺ ύ ὐ ὑ ὒ ὓ ὔ ὕ ὖ ὗ ῠ ῡ ῢ ΰ ῦ ῧ"
_accents_[u'Ω'] = u"ᾪ ᾮ Ὠ Ὤ Ὢ Ὦ Ώ Ὡ Ὥ Ὣ Ὧ Ὼ ῼ ᾩ ᾭ ᾫ ᾯ ᾨ ᾬ"
_accents_[u'ω'] = u"ὥ ῶ ὧ ώ ὠ ῳ ᾧ ὼ ώ ὠ ὡ ὢ ὣ ὤ ὥ ὦ ὧ ᾠ ᾡ ᾢ ᾣ ᾤ ᾥ ᾦ ᾧ ῲ ῳ ῴ ῶ ῷ"
accents = {}
for letter, values in _accents_.iteritems():
for value in values.split():
accents[value] = letter
regex_roman = re.compile(r'[^abcdefghiklmnopqrstuwxyz ]+', re.IGNORECASE)
def preprocess_roman(string):
# regex to remove all special characters leaving only a-z and empty scape
# for example: a)a/atos tau1 -> aaatos tau
return regex_roman.sub('', string)
regex_greek = re.compile('|'.join(accents.keys()))
regex_greek2 = re.compile('[^%s ]+' % u"αΑβΒγΓδΔεΕϛϚϜϝζΖηΗθΘιΙυϒYκΚϡϠͲͳλΛωΩμΜτΤνΝξΞοΟσΣϹϲςπΠχΧϙϘϞϟρΡψΨφΦ")
def preprocess_greek(string):
# convert diacritics to simpler forms
string = unicode(string, encoding="utf-8")
string = regex_greek.sub(lambda x: accents[x.group()], string)
# remove all other characters
return regex_greek2.sub('', string).encode('utf-8')
def find(text, num, cumulative = False):
words = text.split()
numbers = list(map(isopsephy, words))
if cumulative:
result = []
for incides in search.find_cumulative_indices(numbers, num):
result.append(' '.join([words[idx] for idx in incides]))
return result
else:
return [words[idx] for idx in map(numbers.index, numbers) if numbers[idx] == num]
| mit |
yeti-platform/yeti | plugins/feeds/public/azorult-tracker.py | 1 | 3094 | """Azorult Tracker feeds"""
from datetime import timedelta
import logging
import pandas as pd
import numpy as np
from core.feed import Feed
from core.observables import AutonomousSystem, Ip, Hostname, Url
from core.errors import ObservableValidationError
class AzorutTracker(Feed):
"""Azorult Tracker"""
default_values = {
"frequency": timedelta(hours=12),
"name": "Azorult-Tracker",
"source": "https://azorult-tracker.net/api/last-data",
"description": "This feed contains panels of Azorult",
}
def update(self):
for index, item in self.update_json():
self.analyze(item)
def update_json(self):
r = self._make_request()
if r.status_code == 200:
res = r.json()
df = pd.DataFrame(res)
df.replace({np.nan: None}, inplace=True)
df["first_seen"] = pd.to_datetime(df["first_seen"], unit="s")
if self.last_run:
df = df[df["first_seen"] > self.last_run]
return df.iterrows()
def analyze(self, item):
context = {"source": self.name}
_id = item["_id"]
domain = item["domain"]
ip = item["ip"]
asn = item["asn"]
country_code = item["country_code"]
panel_url = item["panel_index"]
panel_path = item["panel_path"]
panel_version = item["panel_version"]
status = item["status"]
feeder = item["feeder"]
first_seen = item["first_seen"]
context["first_seen"] = first_seen
if feeder:
context["feeder"] = feeder
context["status"] = status
if item["data"]:
context["data"] = item["data"]
context["country"] = country_code
context["_id"] = _id
context["panel_version"] = panel_version
context["panel_path"] = panel_path
try:
hostname = None
url = None
ip_obs = None
asn_obs = None
if domain:
hostname = Hostname.get_or_create(value=domain)
hostname.add_context(context)
hostname.tag("azorult")
if ip:
ip_obs = Ip.get_or_create(value=ip)
ip_obs.add_context(context)
ip_obs.tag("azorult")
if panel_url:
url = Url.get_or_create(value=panel_url)
url.add_context(context)
url.tag("azorult")
if asn:
asn_obs = AutonomousSystem.get_or_create(value=asn)
asn_obs.add_context(context)
asn_obs.tag("azorult")
if hostname and ip_obs:
hostname.active_link_to(ip_obs, "IP", self.name)
if asn_obs and ip_obs:
asn_obs.active_link_to(ip_obs, "AS", self.name)
if url and hostname:
url.active_link_to(hostname, "hostname", self.name)
except ObservableValidationError as e:
logging.error(e)
except TypeError as e:
logging.error(item)
| apache-2.0 |
Neurosim-lab/netpyne | netpyne/sim/utils.py | 1 | 28991 | """
Module for utilities related to simulation
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import next
from builtins import dict
from builtins import map
from builtins import str
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from time import time
import hashlib
import array
from numbers import Number
from collections import OrderedDict
from neuron import h# Import NEURON
from ..specs import Dict, ODict
#------------------------------------------------------------------------------
# Convert dict strings to utf8 so can be saved in HDF5 format
#------------------------------------------------------------------------------
def cellByGid(gid):
"""
Function for/to <short description of `netpyne.sim.utils.cellByGid`>
Parameters
----------
gid : <type>
<Short description of gid>
**Default:** *required*
"""
from .. import sim
cell = next((c for c in sim.net.cells if c.gid==gid), None)
return cell
#------------------------------------------------------------------------------
# Get cells list for recording based on set of conditions
#------------------------------------------------------------------------------
def getCellsList(include, returnGids=False):
"""
Function for/to <short description of `netpyne.sim.utils.getCellsList`>
Parameters
----------
include : <type>
<Short description of include>
**Default:** *required*
returnGids : bool
<Short description of returnGids>
**Default:** ``False``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
if sim.nhosts > 1 and any(isinstance(cond, tuple) or isinstance(cond,list) for cond in include): # Gather tags from all cells
allCellTags = sim._gatherAllCellTags()
else:
allCellTags = {cell.gid: cell.tags for cell in sim.net.cells}
cellGids = []
cells = []
for condition in include:
if condition in ['all', 'allCells']: # all cells + Netstims
cells = list(sim.net.cells)
return cells
elif isinstance(condition, int): # cell gid
cellGids.append(condition)
elif isinstance(condition, basestring): # entire pop
cellGids.extend(list(sim.net.pops[condition].cellGids))
elif isinstance(condition, tuple) or isinstance(condition, list): # subset of a pop with relative indices
cellsPop = [gid for gid,tags in allCellTags.items() if tags['pop']==condition[0]]
cellsPop = list(set(cellsPop))
cellsPop.sort()
if isinstance(condition[1], list):
cellGids.extend([gid for i,gid in enumerate(cellsPop) if i in condition[1]])
elif isinstance(condition[1], int):
cellGids.extend([gid for i,gid in enumerate(cellsPop) if i==condition[1]])
cellGids = list(set(cellGids)) # unique values
if returnGids:
return cellGids
else:
cells = [cell for cell in sim.net.cells if cell.gid in cellGids]
return cells
#------------------------------------------------------------------------------
# Timing - Stop Watch
#------------------------------------------------------------------------------
def timing(mode, processName):
"""
Function for/to <short description of `netpyne.sim.utils.timing`>
Parameters
----------
mode : <type>
<Short description of mode>
**Default:** *required*
processName : <type>
<Short description of processName>
**Default:** *required*
"""
from .. import sim
if not hasattr(sim, 'timingData'):
sim.timingData = {}
if hasattr(sim.cfg, 'timing'):
if sim.cfg.timing:
if hasattr(sim, 'rank'):
if sim.rank == 0:
if mode == 'start':
sim.timingData[processName] = time()
elif mode == 'stop':
sim.timingData[processName] = time() - sim.timingData[processName]
else:
if mode == 'start':
sim.timingData[processName] = time()
elif mode == 'stop':
sim.timingData[processName] = time() - sim.timingData[processName]
#------------------------------------------------------------------------------
# Print netpyne version
#------------------------------------------------------------------------------
def version(show=True):
"""
Function for/to <short description of `netpyne.sim.utils.version`>
Parameters
----------
show : bool
<Short description of show>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
from netpyne import __version__
if show:
print(__version__)
return __version__
#------------------------------------------------------------------------------
# Print github version
#------------------------------------------------------------------------------
def gitChangeset(show=True):
"""
Function for/to <short description of `netpyne.sim.utils.gitChangeset`>
Parameters
----------
show : bool
<Short description of show>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
import netpyne, os, subprocess
currentPath = os.getcwd()
try:
netpynePath = os.path.dirname(netpyne.__file__)
os.chdir(netpynePath)
if show: os.system('git log -1')
# get changeset (need to remove initial tag+num and ending '\n')
#changeset = subprocess.check_output(["git", "describe"]).split('-')[2][1:-1]
changeset = subprocess.check_output(["git", "describe"], stderr=subprocess.DEVNULL).split('-')[2][1:-1]
except:
changeset = ''
os.chdir(currentPath)
return changeset
#------------------------------------------------------------------------------
# Hash function for string
#------------------------------------------------------------------------------
def hashStr(obj):
"""
Function for/to <short description of `netpyne.sim.utils.hashStr`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
#return hash(obj) & 0xffffffff # hash func
return int(hashlib.md5(obj.encode('utf-8')).hexdigest()[0:8],16) # convert 8 first chars of md5 hash in base 16 to int
#------------------------------------------------------------------------------
# Hash function for list of values
#------------------------------------------------------------------------------
def hashList(obj):
"""
Function for/to <short description of `netpyne.sim.utils.hashList`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
return int(hashlib.md5(array.array(chr(ord('L')), obj)).hexdigest()[0:8],16)
#------------------------------------------------------------------------------
# Initialize the stim randomizer
#------------------------------------------------------------------------------
def _init_stim_randomizer(rand, stimType, gid, seed):
from .. import sim
rand.Random123(sim.hashStr(stimType), gid, seed)
#------------------------------------------------------------------------------
# Fast function to find unique elements in sequence and preserve order
#------------------------------------------------------------------------------
def unique(seq):
"""
Function for/to <short description of `netpyne.sim.utils.unique`>
Parameters
----------
seq : <type>
<Short description of seq>
**Default:** *required*
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
#------------------------------------------------------------------------------
# Check memory
#------------------------------------------------------------------------------
def checkMemory():
"""
Function for/to <short description of `netpyne.sim.utils.checkMemory`>
"""
from .. import sim
# print memory diagnostic info
if sim.rank == 0: # and checkMemory:
import resource
print('\nMEMORY -----------------------')
print('Sections: ')
print(h.topology())
print('NetCons: ')
print(len(h.List("NetCon")))
print('NetStims:')
print(len(h.List("NetStim")))
print('\n Memory usage: %s \n' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
# import objgraph
# objgraph.show_most_common_types()
print('--------------------------------\n')
#------------------------------------------------------------------------------
# Replace item with specific key from dict or list (used to remove h objects)
#------------------------------------------------------------------------------
def copyReplaceItemObj(obj, keystart, newval, objCopy='ROOT', exclude_list=[]):
"""
Function for/to <short description of `netpyne.sim.utils.copyReplaceItemObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
keystart : <type>
<Short description of keystart>
**Default:** *required*
newval : <type>
<Short description of newval>
**Default:** *required*
objCopy : str
<Short description of objCopy>
**Default:** ``'ROOT'``
**Options:** ``<option>`` <description of option>
exclude_list : list
<Short description of exclude_list>
**Default:** ``[]``
**Options:** ``<option>`` <description of option>
"""
if type(obj) == list:
if objCopy=='ROOT':
objCopy = []
for item in obj:
if isinstance(item, list):
objCopy.append([])
copyReplaceItemObj(item, keystart, newval, objCopy[-1], exclude_list)
elif isinstance(item, (dict, Dict)):
objCopy.append({})
copyReplaceItemObj(item, keystart, newval, objCopy[-1], exclude_list)
else:
objCopy.append(item)
elif isinstance(obj, (dict, Dict)):
if objCopy == 'ROOT':
objCopy = Dict()
for key,val in obj.items():
if type(val) in [list]:
objCopy[key] = []
copyReplaceItemObj(val, keystart, newval, objCopy[key], exclude_list)
elif isinstance(val, (dict, Dict)):
objCopy[key] = {}
copyReplaceItemObj(val, keystart, newval, objCopy[key], exclude_list)
elif key.startswith(keystart) and key not in exclude_list:
objCopy[key] = newval
else:
objCopy[key] = val
return objCopy
#------------------------------------------------------------------------------
# Remove item with specific key from dict or list (used to remove h objects)
#------------------------------------------------------------------------------
def copyRemoveItemObj(obj, keystart, objCopy='ROOT', exclude_list=[]):
"""
Function for/to <short description of `netpyne.sim.utils.copyRemoveItemObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
keystart : <type>
<Short description of keystart>
**Default:** *required*
objCopy : str
<Short description of objCopy>
**Default:** ``'ROOT'``
**Options:** ``<option>`` <description of option>
exclude_list : list
<Short description of exclude_list>
**Default:** ``[]``
**Options:** ``<option>`` <description of option>
"""
if type(obj) == list:
if objCopy=='ROOT':
objCopy = []
for item in obj:
if isinstance(item, list):
objCopy.append([])
copyRemoveItemObj(item, keystart, objCopy[-1], exclude_list)
elif isinstance(item, (dict, Dict)):
objCopy.append({})
copyRemoveItemObj(item, keystart, objCopy[-1], exclude_list)
else:
objCopy.append(item)
elif isinstance(obj, (dict, Dict)):
if objCopy == 'ROOT':
objCopy = Dict()
for key,val in obj.items():
if type(val) in [list]:
objCopy[key] = []
copyRemoveItemObj(val, keystart, objCopy[key], exclude_list)
elif isinstance(val, (dict, Dict)):
objCopy[key] = {}
copyRemoveItemObj(val, keystart, objCopy[key], exclude_list)
elif key.startswith(keystart) and key not in exclude_list:
objCopy.pop(key, None)
else:
objCopy[key] = val
return objCopy
#------------------------------------------------------------------------------
# Replace item with specific key from dict or list (used to remove h objects)
#------------------------------------------------------------------------------
def replaceItemObj(obj, keystart, newval, exclude_list=[]):
"""
Function for/to <short description of `netpyne.sim.utils.replaceItemObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
keystart : <type>
<Short description of keystart>
**Default:** *required*
newval : <type>
<Short description of newval>
**Default:** *required*
exclude_list : list
<Short description of exclude_list>
**Default:** ``[]``
**Options:** ``<option>`` <description of option>
"""
if type(obj) == list:
for item in obj:
if type(item) in [list, dict]:
replaceItemObj(item, keystart, newval, exclude_list)
elif type(obj) == dict:
for key,val in obj.items():
if type(val) in [list, dict]:
replaceItemObj(val, keystart, newval, exclude_list)
if key.startswith(keystart) and key not in exclude_list:
obj[key] = newval
return obj
#------------------------------------------------------------------------------
# Recursivele replace dict keys
#------------------------------------------------------------------------------
def replaceKeys(obj, oldkey, newkey):
"""
Function for/to <short description of `netpyne.sim.utils.replaceKeys`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
oldkey : <type>
<Short description of oldkey>
**Default:** *required*
newkey : <type>
<Short description of newkey>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if isinstance(item, (list, dict, Dict, ODict, OrderedDict)):
replaceKeys(item, oldkey, newkey)
elif isinstance(obj, (dict, Dict, ODict, OrderedDict)):
for key in list(obj.keys()):
val = obj[key]
if isinstance(val, (list, dict, Dict, ODict, OrderedDict)):
replaceKeys(val, oldkey, newkey)
if key == oldkey:
obj[newkey] = obj.pop(oldkey)
return obj
#------------------------------------------------------------------------------
# Replace functions from dict or list with function string (so can be pickled)
#------------------------------------------------------------------------------
def replaceFuncObj(obj):
"""
Function for/to <short description of `netpyne.sim.utils.replaceFuncObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if type(item) in [list, dict]:
replaceFuncObj(item)
elif type(obj) == dict:
for key,val in obj.items():
if type(val) in [list, dict]:
replaceFuncObj(val)
if 'func_name' in dir(val): #hasattr(val,'func_name'): # avoid hasattr() since it creates key in Dicts()
obj[key] = 'func' # funcSource
return obj
#------------------------------------------------------------------------------
# Replace None from dict or list with [](so can be saved to .mat)
#------------------------------------------------------------------------------
def replaceNoneObj(obj):
"""
Function for/to <short description of `netpyne.sim.utils.replaceNoneObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:# or type(obj) == tuple:
for item in obj:
if isinstance(item, (list, dict, Dict, ODict)):
replaceNoneObj(item)
elif isinstance(obj, (dict, Dict, ODict)):
for key,val in obj.items():
if isinstance(val, (list, dict, Dict, ODict)):
replaceNoneObj(val)
if val == None:
obj[key] = []
elif val == {}:
obj[key] = [] # also replace empty dicts with empty list
return obj
#------------------------------------------------------------------------------
# Replace Dict with dict and Odict with OrderedDict
#------------------------------------------------------------------------------
def replaceDictODict(obj):
"""
Function for/to <short description of `netpyne.sim.utils.replaceDictODict`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if type(item) == Dict:
item = item.todict()
elif type(item) == ODict:
item = item.toOrderedDict()
if type(item) in [list, dict, OrderedDict]:
replaceDictODict(item)
elif type(obj) in [dict, OrderedDict, Dict, ODict]:
for key,val in obj.items():
if type(val) == Dict:
obj[key] = val.todict()
elif type(val) == ODict:
obj[key] = val.toOrderedDict()
if type(val) in [list, dict, OrderedDict]:
replaceDictODict(val)
return obj
#------------------------------------------------------------------------------
# Rename objects
#------------------------------------------------------------------------------
def rename(obj, old, new, label=None):
"""
Function for/to <short description of `netpyne.sim.utils.rename`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
old : <type>
<Short description of old>
**Default:** *required*
new : <type>
<Short description of new>
**Default:** *required*
label : <``None``?>
<Short description of label>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
"""
try:
return obj.rename(old, new, label)
except:
if type(obj) == dict and old in obj:
obj[new] = obj.pop(old) # replace
return True
else:
return False
#------------------------------------------------------------------------------
# Replace tuples with str
#------------------------------------------------------------------------------
def tupleToList(obj):
"""
Function for/to <short description of `netpyne.sim.utils.tupleToList`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if type(item) in [list, dict]:
tupleToList(item)
elif type(item) == tuple:
obj[obj.index(item)] = list(item)
elif isinstance(obj, (dict, ODict)):
for key,val in obj.items():
if isinstance(val, (list, dict, ODict)):
tupleToList(val)
elif type(val) == tuple:
obj[key] = list(val) # also replace empty dicts with empty list
return obj
#------------------------------------------------------------------------------
# Replace Decimal with float
#------------------------------------------------------------------------------
def decimalToFloat(obj):
"""
Function for/to <short description of `netpyne.sim.utils.decimalToFloat`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
from decimal import Decimal
if type(obj) == list:
for i,item in enumerate(obj):
if type(item) in [list, dict, tuple]:
decimalToFloat(item)
elif type(item) == Decimal:
obj[i] = float(item)
elif isinstance(obj, dict):
for key,val in obj.items():
if isinstance(val, (list, dict)):
decimalToFloat(val)
elif type(val) == Decimal:
obj[key] = float(val) # also replace empty dicts with empty list
return obj
#------------------------------------------------------------------------------
# Recursively remove items of an object (used to avoid mem leaks)
#------------------------------------------------------------------------------
def clearObj(obj):
"""
Function for/to <short description of `netpyne.sim.utils.clearObj`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
if type(obj) == list:
for item in obj:
if isinstance(item, (list, dict, Dict, ODict)):
clearObj(item)
del item
elif isinstance(obj, (dict, Dict, ODict)):
for key in list(obj.keys()):
val = obj[key]
if isinstance(val, (list, dict, Dict, ODict)):
clearObj(val)
del obj[key]
return obj
#------------------------------------------------------------------------------
# Support funcs to load from mat
#------------------------------------------------------------------------------
def _mat2dict(obj):
"""
A recursive function which constructs from matobjects nested dictionaries
Enforce lists for conns, synMechs and stims even if 1 element (matlab converts to dict otherwise)
"""
import scipy.io as spio
import numpy as np
if isinstance(obj, dict):
out = {}
for key in obj:
if isinstance(obj[key], spio.matlab.mio5_params.mat_struct):
if key in ['conns', 'stims', 'synMechs']:
out[key] = [_mat2dict(obj[key])] # convert to 1-element list
else:
out[key] = _mat2dict(obj[key])
elif isinstance(obj[key], np.ndarray):
out[key] = _mat2dict(obj[key])
else:
out[key] = obj[key]
elif isinstance(obj, spio.matlab.mio5_params.mat_struct):
out = {}
for key in obj._fieldnames:
val = obj.__dict__[key]
if isinstance(val, spio.matlab.mio5_params.mat_struct):
if key in ['conns', 'stims', 'synMechs']:
out[key] = [_mat2dict(val)] # convert to 1-element list
else:
out[key] = _mat2dict(val)
elif isinstance(val, np.ndarray):
out[key] = _mat2dict(val)
else:
out[key] = val
elif isinstance(obj, np.ndarray):
out = []
for item in obj:
if isinstance(item, spio.matlab.mio5_params.mat_struct) or isinstance(item, np.ndarray):
out.append(_mat2dict(item))
else:
out.append(item)
else:
out = obj
return out
#------------------------------------------------------------------------------
# Convert dict strings to utf8 so can be saved in HDF5 format
#------------------------------------------------------------------------------
def _dict2utf8(obj):
#unidict = {k.decode('utf8'): v.decode('utf8') for k, v in strdict.items()}
#print obj
import collections
if isinstance(obj, basestring):
return obj.decode('utf8')
elif isinstance(obj, collections.Mapping):
for key in list(obj.keys()):
if isinstance(key, Number):
obj[str(key).decode('utf8')] = obj[key]
obj.pop(key)
return dict(list(map(_dict2utf8, iter(obj.items()))))
elif isinstance(obj, collections.Iterable):
return type(obj)(list(map(_dict2utf8, obj)))
else:
return obj
#------------------------------------------------------------------------------
# Clear all sim objects in memory
#------------------------------------------------------------------------------
def clearAll():
"""
Function to clear all sim objects in memory
"""
from .. import sim
import numpy as np
# clean up
sim.pc.barrier()
sim.pc.gid_clear() # clear previous gid settings
# clean cells and simData in all nodes
if hasattr(sim, 'net'):
sim.clearObj([cell.__dict__ if hasattr(cell, '__dict__') else cell for cell in sim.net.cells])
if hasattr(sim, 'simData'):
if 'stims' in list(sim.simData.keys()):
sim.clearObj([stim for stim in sim.simData['stims']])
for key in list(sim.simData.keys()): del sim.simData[key]
if hasattr(sim, 'net'):
for c in sim.net.cells: del c
for p in sim.net.pops: del p
del sim.net.params
# clean cells and simData gathered in master node
if hasattr(sim, 'rank'):
if sim.rank == 0:
if hasattr(sim, 'net'):
if hasattr(sim.net, 'allCells'):
sim.clearObj([cell.__dict__ if hasattr(cell, '__dict__') else cell for cell in sim.net.allCells])
if hasattr(sim, 'allSimData'):
for key in list(sim.allSimData.keys()): del sim.allSimData[key]
if 'stims' in list(sim.allSimData.keys()):
sim.clearObj([stim for stim in sim.allSimData['stims']])
if hasattr(sim, 'net'):
for c in sim.net.allCells: del c
for p in sim.net.allPops: del p
del sim.net.allCells
if hasattr(sim, 'allSimData'):
del sim.allSimData
import matplotlib
matplotlib.pyplot.clf()
matplotlib.pyplot.close('all')
# clean rxd components
if hasattr(sim.net, 'rxd'):
sim.clearObj(sim.net.rxd)
if 'rxd' not in globals():
try:
from neuron import crxd as rxd
except:
pass
#try:
for r in rxd.rxd._all_reactions[:]:
if r():
rxd.rxd._unregister_reaction(r)
for s in rxd.species._all_species:
if s():
s().__del__()
rxd.region._all_regions = []
rxd.region._region_count = 0
rxd.region._c_region_lookup = None
rxd.species._species_counts = 0
rxd.section1d._purge_cptrs()
rxd.initializer.has_initialized = False
rxd.rxd.free_conc_ptrs()
rxd.rxd.free_curr_ptrs()
rxd.rxd.rxd_include_node_flux1D(0, None, None, None)
rxd.species._has_1d = False
rxd.species._has_3d = False
rxd.rxd._zero_volume_indices = np.ndarray(0, dtype=np.int_)
rxd.set_solve_type(dimension=1)
# clear reactions in case next sim does not use rxd
rxd.rxd.clear_rates()
for obj in rxd.__dict__:
sim.clearObj(obj)
#except:
# pass
if hasattr(sim, 'net'):
del sim.net
import gc; gc.collect()
#------------------------------------------------------------------------------
# Create a subclass of json.JSONEncoder to convert numpy types in Python types
#------------------------------------------------------------------------------
import json
import numpy as np
class NpSerializer(json.JSONEncoder):
"""
Class for/to <short description of `netpyne.sim.utils.NpSerializer`>
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpSerializer, self).default(obj)
| mit |
thientu/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
equialgo/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
mieand/kmos | kmos/cli.py | 1 | 14497 | #!/usr/bin/env python
"""Entry point module for the command-line
interface. The kmos executable should be
on the program path, import this modules
main function and run it.
To call kmos command as you would from the shell,
use ::
kmos.cli.main('...')
Every command can be shortened as long as it is non-ambiguous, e.g. ::
kmos ex <xml-file>
instead of ::
kmos export <xml-file>
etc.
"""
# Copyright 2009-2013 Max J. Hoffmann ([email protected])
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import shutil
usage = {}
usage['all'] = """kmos help all
Display documentation for all commands.
"""
usage['benchmark'] = """kmos benchmark
Run 1 mio. kMC steps on model in current directory
and report runtime.
"""
usage['build'] = """kmos build
Build kmc_model.%s from *f90 files in the
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['help'] = """kmos help <command>
Print usage information for the given command.
"""
usage['export'] = """kmos export <xml-file> [<export-path>]
Take a kmos xml-file and export all generated
source code to the export-path. There try to
build the kmc_model.%s.
Additional Parameters ::
-s/--source-only
Export source only and don't build binary
-b/--backend (local_smart|lat_int)
Choose backend. Default is "local_smart".
lat_int is EXPERIMENTAL and not made
for production, yet.
-d/--debug
Turn on assertion statements in F90 code.
(Only active in compile step)
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['settings-export'] = """kmos settings-export <xml-file> [<export-path>]
Take a kmos xml-file and export kmc_settings.py
to the export-path.
"""
usage['edit'] = """kmos edit <xml-file>
Open the kmos xml-file in a GUI to edit
the model.
"""
usage['import'] = """kmos import <xml-file>
Take a kmos xml-file and open an ipython shell
with the project_tree imported as pt.
"""
usage['rebuild'] = """kmos rebuild
Export code and rebuild binary module from XML
information included in kmc_settings.py in
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
"""
usage['shell'] = """kmos shell
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['run'] = """kmos run
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['version'] = """kmos version
Print version number and exit.
"""
usage['view'] = """kmos view
Take a kmc_model.%s and kmc_settings.py in the
same directory and start to simulate the
model visually.
Additional Parameters ::
-v/--steps-per-frame <number>
Number of steps per frame
""" % ('pyd' if os.name == 'nt' else 'so')
usage['xml'] = """kmos xml
Print xml representation of model to stdout
"""
def get_options(args=None, get_parser=False):
import optparse
import os
from glob import glob
import kmos
parser = optparse.OptionParser(
'Usage: %prog [help] ('
+ '|'.join(sorted(usage.keys()))
+ ') [options]',
version=kmos.__version__)
parser.add_option('-s', '--source-only',
dest='source_only',
action='store_true',
default=False)
parser.add_option('-p', '--path-to-f2py',
dest='path_to_f2py',
default='f2py')
parser.add_option('-b', '--backend',
dest='backend',
default='local_smart')
parser.add_option('-v', '--steps-per-frame',
dest='steps_per_frame',
type='int',
default='50000')
parser.add_option('-d', '--debug',
default=False,
dest='debug',
action='store_true')
parser.add_option('-n', '--no-compiler-optimization',
default=False,
dest='no_optimize',
action='store_true')
try:
from numpy.distutils.fcompiler import get_default_fcompiler
from numpy.distutils import log
log.set_verbosity(-1, True)
fcompiler = get_default_fcompiler()
except:
fcompiler = 'gfortran'
parser.add_option('-f', '--fcompiler',
dest='fcompiler',
default=os.environ.get('F2PY_FCOMPILER', fcompiler))
if args is not None:
options, args = parser.parse_args(args.split())
else:
options, args = parser.parse_args()
if len(args) < 1:
parser.error('Command expected')
if get_parser:
return options, args, parser
else:
return options, args
def match_keys(arg, usage, parser):
"""Try to match part of a command against
the set of commands from usage. Throws
an error if not successful.
"""
possible_args = [key for key in usage if key.startswith(arg)]
if len(possible_args) == 0:
parser.error('Command "%s" not understood.' % arg)
elif len(possible_args) > 1:
parser.error(('Command "%s" ambiguous.\n'
'Could be one of %s\n\n') % (arg, possible_args))
else:
return possible_args[0]
def main(args=None):
"""The CLI main entry point function.
The optional argument args, can be used to
directly supply command line argument like
$ kmos <args>
otherwise args will be taken from STDIN.
"""
from glob import glob
options, args, parser = get_options(args, get_parser=True)
if not args[0] in usage.keys():
args[0] = match_keys(args[0], usage, parser)
if args[0] == 'benchmark':
from sys import path
path.append(os.path.abspath(os.curdir))
nsteps = 1000000
from time import time
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
time0 = time()
try:
model.proclist.do_kmc_steps(nsteps)
except: # kmos < 0.3 had no model.proclist.do_kmc_steps
model.do_steps(nsteps)
needed_time = time() - time0
print('Using the [%s] backend.' % model.get_backend())
print('%s steps took %.2f seconds' % (nsteps, needed_time))
print('Or %.2e steps/s' % (1e6 / needed_time))
model.deallocate()
elif args[0] == 'build':
from kmos.utils import build
build(options)
elif args[0] == 'edit':
from kmos import gui
gui.main()
elif args[0] == 'settings-export':
import kmos.types
import kmos.io
from kmos.io import ProcListWriter
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = os.path.splitext(args[1])[0]
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = args[2]
project = kmos.types.Project()
project.import_xml_file(xml_file)
writer = ProcListWriter(project, export_dir)
writer.write_settings()
elif args[0] == 'export':
import kmos.types
import kmos.io
from kmos.utils import build
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = os.path.join(args[2], 'src')
project = kmos.types.Project()
project.import_xml_file(xml_file)
kmos.io.export_source(project,
export_dir,
code_generator=options.backend)
if ((os.name == 'posix'
and os.uname()[0] in ['Linux', 'Darwin'])
or os.name == 'nt') \
and not options.source_only:
os.chdir(export_dir)
build(options)
for out in glob('kmc_*'):
if os.path.exists('../%s' % out):
overwrite = raw_input(('Should I overwrite existing %s ?'
'[y/N] ') % out).lower()
if overwrite.startswith('y'):
os.remove('../%s' % out)
shutil.move(out, '..')
else:
shutil.move(out, '..')
elif args[0] == 'settings-export':
import kmos.io
pt = kmos.io.import_xml_file(args[1])
if len(args) < 3:
out_dir = os.path.splitext(args[1])[0]
print('No export path provided. Exporting kmc_settings.py to %s'
% out_dir)
args.append(out_dir)
if not os.path.exists(args[2]):
os.mkdir(args[2])
elif not os.path.isdir(args[2]):
raise UserWarning("Cannot overwrite %s; Exiting;" % args[2])
writer = kmos.io.ProcListWriter(pt, args[2])
writer.write_settings()
elif args[0] == 'help':
if len(args) < 2:
parser.error('Which help do you want?')
if args[1] == 'all':
for command in sorted(usage):
print(usage[command])
elif args[1] in usage:
print('Usage: %s\n' % usage[args[1]])
else:
arg = match_keys(args[1], usage, parser)
print('Usage: %s\n' % usage[arg])
elif args[0] == 'import':
import kmos.io
if not len(args) >= 2:
raise UserWarning('XML file name expected.')
global pt
pt = kmos.io.import_xml_file(args[1])
sh(banner='Note: pt = kmos.io.import_xml(\'%s\')' % args[1])
elif args[0] == 'rebuild':
from time import sleep
print('Will rebuild model from kmc_settings.py in current directory')
print('Please do not interrupt,'
' build process, as you will most likely')
print('loose the current model files.')
sleep(2.)
from sys import path
path.append(os.path.abspath(os.curdir))
from tempfile import mktemp
if not os.path.exists('kmc_model.so') \
and not os.path.exists('kmc_model.pyd'):
raise Exception('No kmc_model.so found.')
if not os.path.exists('kmc_settings.py'):
raise Exception('No kmc_settings.py found.')
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
tempfile = mktemp()
f = file(tempfile, 'w')
f.write(model.xml())
f.close()
for kmc_model in glob('kmc_model.*'):
os.remove(kmc_model)
os.remove('kmc_settings.py')
main('export %s -b %s .' % (tempfile, options.backend))
os.remove(tempfile)
model.deallocate()
elif args[0] in ['run', 'shell']:
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
# useful to have in interactive mode
import numpy as np
try:
from matplotlib import pyplot as plt
except:
plt = None
try:
model = KMC_Model(print_rates=False)
except:
print("Warning: could not import kmc_model!"
" Please make sure you are in the right directory")
global model, np
sh(banner='Note: model = KMC_Model(print_rates=False)')
try:
model.deallocate()
except:
print("Warning: could not deallocate model. Was is allocated?")
elif args[0] == 'version':
from kmos import VERSION
print(VERSION)
elif args[0] == 'view':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos import view
view.main(steps_per_frame=options.steps_per_frame)
elif args[0] == 'xml':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
model = KMC_Model(banner=False, print_rates=False)
print(model.xml())
else:
parser.error('Command "%s" not understood.' % args[0])
def sh(banner):
"""Wrapper around interactive ipython shell
that factors out ipython version depencies.
"""
from distutils.version import LooseVersion
import IPython
if hasattr(IPython, 'release'):
try:
from IPython.terminal.embed import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
try:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
else:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
| gpl-3.0 |
toobaz/pandas | pandas/tests/groupby/aggregate/test_aggregate.py | 2 | 18835 | """
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
from collections import OrderedDict
import functools
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
from pandas.core.base import SpecificationError
from pandas.core.groupby.generic import _maybe_mangle_lambdas
from pandas.core.groupby.grouper import Grouping
import pandas.util.testing as tm
def test_agg_regression1(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_must_agg(df):
grouped = df.groupby("A")["C"]
msg = "Must produce aggregated value"
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.describe())
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.index[:2])
def test_agg_ser_multi_key(df):
# TODO(wesm): unused
ser = df.C # noqa
f = lambda x: x.sum()
results = df.C.groupby([df.A, df.B]).aggregate(f)
expected = df.groupby(["A", "B"]).sum()["C"]
tm.assert_series_equal(results, expected)
def test_groupby_aggregation_mixed_dtype():
# GH 6212
expected = DataFrame(
{
"v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
"v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
},
index=MultiIndex.from_tuples(
[
(1, 95),
(1, 99),
(2, 95),
(2, 99),
("big", "damp"),
("blue", "dry"),
("red", "red"),
("red", "wet"),
],
names=["by1", "by2"],
),
)
df = DataFrame(
{
"v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
"v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
"by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
"by2": [
"wet",
"dry",
99,
95,
np.nan,
"damp",
95,
99,
"red",
99,
np.nan,
np.nan,
],
}
)
g = df.groupby(["by1", "by2"])
result = g[["v1", "v2"]].mean()
tm.assert_frame_equal(result, expected)
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
assert ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index([], dtype=np.float64))
tm.assert_series_equal(grouped.sum(), exp)
tm.assert_series_equal(grouped.agg(np.sum), exp)
tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = tsframe.groupby(tsframe["A"] * np.nan)
exp_df = DataFrame(
columns=tsframe.columns, dtype=float, index=pd.Index([], dtype=np.float64)
)
tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False)
tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], check_names=False)
def test_agg_grouping_is_list_tuple(ts):
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_python_multiindex(mframe):
grouped = mframe.groupby(["A", "B"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
)
def test_aggregate_str_func(tsframe, groupbyfunc):
grouped = tsframe.groupby(groupbyfunc)
# single series
result = grouped["A"].agg("std")
expected = grouped["A"].std()
tm.assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate("var")
expected = grouped.var()
tm.assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(
OrderedDict([["A", "var"], ["B", "std"], ["C", "mean"], ["D", "sem"]])
)
expected = DataFrame(
OrderedDict(
[
["A", grouped["A"].var()],
["B", grouped["B"].std()],
["C", grouped["C"].mean()],
["D", grouped["D"].sem()],
]
)
)
tm.assert_frame_equal(result, expected)
def test_aggregate_item_by_item(df):
grouped = df.groupby("A")
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (df.A == "foo").sum()
bar = (df.A == "bar").sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(
np.array([foo] * K), index=list("BCD"), dtype=np.float64, name="foo"
)
tm.assert_series_equal(result.xs("foo"), exp)
exp = pd.Series(
np.array([bar] * K), index=list("BCD"), dtype=np.float64, name="bar"
)
tm.assert_almost_equal(result.xs("bar"), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_wrap_agg_out(three_group):
grouped = three_group.groupby(["A", "B"])
def func(ser):
if ser.dtype == np.object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = three_group.loc[:, three_group.columns != "C"]
expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_maintain_order(df):
# GH #610
funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
result = df.groupby("A")["C"].agg(funcs)
exp_cols = Index(["mean", "max", "min"])
tm.assert_index_equal(result.columns, exp_cols)
def test_multiple_functions_tuples_and_non_tuples(df):
# #1359
funcs = [("foo", "mean"), "std"]
ex_funcs = [("foo", "mean"), ("std", "std")]
result = df.groupby("A")["C"].agg(funcs)
expected = df.groupby("A")["C"].agg(ex_funcs)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").agg(funcs)
expected = df.groupby("A").agg(ex_funcs)
tm.assert_frame_equal(result, expected)
def test_more_flexible_frame_multi_function(df):
grouped = df.groupby("A")
exmean = grouped.agg(OrderedDict([["C", np.mean], ["D", np.mean]]))
exstd = grouped.agg(OrderedDict([["C", np.std], ["D", np.std]]))
expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
d = OrderedDict([["C", [np.mean, np.std]], ["D", [np.mean, np.std]]])
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate(OrderedDict([["C", np.mean], ["D", [np.mean, np.std]]]))
expected = grouped.aggregate(
OrderedDict([["C", np.mean], ["D", [np.mean, np.std]]])
)
tm.assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
d = OrderedDict(
[["C", np.mean], ["D", OrderedDict([["foo", np.mean], ["bar", np.std]])]]
)
result = grouped.aggregate(d)
d = OrderedDict([["C", [np.mean]], ["D", [foo, bar]]])
expected = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
def test_multi_function_flexible_mix(df):
# GH #1268
grouped = df.groupby("A")
# Expected
d = OrderedDict(
[["C", OrderedDict([["foo", "mean"], ["bar", "std"]])], ["D", {"sum": "sum"}]]
)
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = grouped.aggregate(d)
# Test 1
d = OrderedDict(
[["C", OrderedDict([["foo", "mean"], ["bar", "std"]])], ["D", "sum"]]
)
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# Test 2
d = OrderedDict(
[["C", OrderedDict([["foo", "mean"], ["bar", "std"]])], ["D", ["sum"]]]
)
# this uses column selection & renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
def test_groupby_agg_coercing_bools():
# issue 14873
dat = pd.DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
gp = dat.groupby("a")
index = Index([1, 2], name="a")
result = gp["b"].aggregate(lambda x: (x != 0).all())
expected = Series([False, True], index=index, name="b")
tm.assert_series_equal(result, expected)
result = gp["c"].aggregate(lambda x: x.isnull().all())
expected = Series([True, False], index=index, name="c")
tm.assert_series_equal(result, expected)
def test_order_aggregate_multiple_funcs():
# GH 25692
df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
result = res.columns.levels[1]
expected = pd.Index(["sum", "max", "mean", "ohlc", "min"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.int64, np.uint64])
@pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
def test_uint64_type_handling(dtype, how):
# GH 26310
df = pd.DataFrame({"x": 6903052872240755750, "y": [1, 2]})
expected = df.groupby("y").agg({"x": how})
df.x = df.x.astype(dtype)
result = df.groupby("y").agg({"x": how})
result.x = result.x.astype(np.int64)
tm.assert_frame_equal(result, expected, check_exact=True)
class TestNamedAggregationSeries:
def test_series_named_agg(self):
df = pd.Series([1, 2, 3, 4])
gr = df.groupby([0, 0, 1, 1])
result = gr.agg(a="sum", b="min")
expected = pd.DataFrame(
{"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=[0, 1]
)
tm.assert_frame_equal(result, expected)
result = gr.agg(b="min", a="sum")
# sort for 35 and earlier
if compat.PY36:
expected = expected[["b", "a"]]
tm.assert_frame_equal(result, expected)
def test_no_args_raises(self):
gr = pd.Series([1, 2]).groupby([0, 1])
with pytest.raises(TypeError, match="Must provide"):
gr.agg()
# but we do allow this
result = gr.agg([])
expected = pd.DataFrame()
tm.assert_frame_equal(result, expected)
def test_series_named_agg_duplicates_raises(self):
# This is a limitation of the named agg implementation reusing
# aggregate_multiple_funcs. It could maybe be lifted in the future.
gr = pd.Series([1, 2, 3]).groupby([0, 0, 1])
with pytest.raises(SpecificationError):
gr.agg(a="sum", b="sum")
def test_mangled(self):
gr = pd.Series([1, 2, 3]).groupby([0, 0, 1])
result = gr.agg(a=lambda x: 0, b=lambda x: 1)
expected = pd.DataFrame({"a": [0, 0], "b": [1, 1]})
tm.assert_frame_equal(result, expected)
class TestNamedAggregationDataFrame:
def test_agg_relabel(self):
df = pd.DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
expected = pd.DataFrame(
{"a_max": [1, 3], "b_max": [6, 8]},
index=pd.Index(["a", "b"], name="group"),
columns=["a_max", "b_max"],
)
tm.assert_frame_equal(result, expected)
# order invariance
p98 = functools.partial(np.percentile, q=98)
result = df.groupby("group").agg(
b_min=("B", "min"),
a_min=("A", min),
a_mean=("A", np.mean),
a_max=("A", "max"),
b_max=("B", "max"),
a_98=("A", p98),
)
expected = pd.DataFrame(
{
"b_min": [5, 7],
"a_min": [0, 2],
"a_mean": [0.5, 2.5],
"a_max": [1, 3],
"b_max": [6, 8],
"a_98": [0.98, 2.98],
},
index=pd.Index(["a", "b"], name="group"),
columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
)
if not compat.PY36:
expected = expected[["a_98", "a_max", "a_mean", "a_min", "b_max", "b_min"]]
tm.assert_frame_equal(result, expected)
def test_agg_relabel_non_identifier(self):
df = pd.DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(**{"my col": ("A", "max")})
expected = pd.DataFrame(
{"my col": [1, 3]}, index=pd.Index(["a", "b"], name="group")
)
tm.assert_frame_equal(result, expected)
def test_duplicate_raises(self):
# TODO: we currently raise on multiple lambdas. We could *maybe*
# update com.get_callable_name to append `_i` to each lambda.
df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
with pytest.raises(SpecificationError, match="Function names"):
df.groupby("A").agg(a=("A", "min"), b=("A", "min"))
def test_agg_relabel_with_level(self):
df = pd.DataFrame(
{"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
index=pd.MultiIndex.from_product([["A", "B"], ["a", "b"]]),
)
result = df.groupby(level=0).agg(
aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
)
expected = pd.DataFrame(
{"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_other_raises(self):
df = pd.DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
grouped = df.groupby("A")
match = "Must provide"
with pytest.raises(TypeError, match=match):
grouped.agg(foo=1)
with pytest.raises(TypeError, match=match):
grouped.agg()
with pytest.raises(TypeError, match=match):
grouped.agg(a=("B", "max"), b=(1, 2, 3))
def test_missing_raises(self):
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
with pytest.raises(KeyError, match="Column 'C' does not exist"):
df.groupby("A").agg(c=("C", "sum"))
def test_agg_namedtuple(self):
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
result = df.groupby("A").agg(
b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
)
expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
tm.assert_frame_equal(result, expected)
def test_mangled(self):
df = pd.DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
expected = pd.DataFrame(
{"b": [0, 0], "c": [1, 1]}, index=pd.Index([0, 1], name="A")
)
tm.assert_frame_equal(result, expected)
class TestLambdaMangling:
def test_maybe_mangle_lambdas_passthrough(self):
assert _maybe_mangle_lambdas("mean") == "mean"
assert _maybe_mangle_lambdas(lambda x: x).__name__ == "<lambda>"
# don't mangel single lambda.
assert _maybe_mangle_lambdas([lambda x: x])[0].__name__ == "<lambda>"
def test_maybe_mangle_lambdas_listlike(self):
aggfuncs = [lambda x: 1, lambda x: 2]
result = _maybe_mangle_lambdas(aggfuncs)
assert result[0].__name__ == "<lambda_0>"
assert result[1].__name__ == "<lambda_1>"
assert aggfuncs[0](None) == result[0](None)
assert aggfuncs[1](None) == result[1](None)
def test_maybe_mangle_lambdas(self):
func = {"A": [lambda x: 0, lambda x: 1]}
result = _maybe_mangle_lambdas(func)
assert result["A"][0].__name__ == "<lambda_0>"
assert result["A"][1].__name__ == "<lambda_1>"
def test_maybe_mangle_lambdas_args(self):
func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]}
result = _maybe_mangle_lambdas(func)
assert result["A"][0].__name__ == "<lambda_0>"
assert result["A"][1].__name__ == "<lambda_1>"
assert func["A"][0](0, 1) == (0, 1, 1)
assert func["A"][0](0, 1, 2) == (0, 1, 2)
assert func["A"][0](0, 2, b=3) == (0, 2, 3)
def test_maybe_mangle_lambdas_named(self):
func = OrderedDict(
[("C", np.mean), ("D", OrderedDict([("foo", np.mean), ("bar", np.mean)]))]
)
result = _maybe_mangle_lambdas(func)
assert result == func
def test_basic(self):
df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
expected = pd.DataFrame(
{("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
index=pd.Index([0, 1], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_mangle_series_groupby(self):
gr = pd.Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
result = gr.agg([lambda x: 0, lambda x: 1])
expected = pd.DataFrame({"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
def test_with_kwargs(self):
f1 = lambda x, y, b=1: x.sum() + y + b
f2 = lambda x, y, b=2: x.sum() + y * b
result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
expected = pd.DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
tm.assert_frame_equal(result, expected)
result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
expected = pd.DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
gregreen/bayestar | scripts/s82_completeness.py | 1 | 18766 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# s82_completeness.py
#
# Copyright 2013 Greg Green <greg@greg-G53JW>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os, sys, argparse
from os.path import abspath
import matplotlib as mplib
#mplib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import healpy as hp
import numpy as np
import scipy
import scipy.special
import pyfits
import h5py
import lsd
from ps import pssdsstransformall
import iterators
def flux2luptitudes(x, b):
'''
Convert flux to Luptitudes (asinh magnitudes).
Inputs:
x Flux in units of the flux at zeroeth magnitude
b Dimensionless softening parameter
'''
return -2.5 / np.log(10.) * (np.arcsinh(x / (2. * b)) + np.log(b))
def luptitudes2flux(mu, b):
'''
Convert Luptitudes (asinh magnitudes) to flux (in
units of the flux at zeroeth magnitude).
Inputs:
mu Luptitudes
b Dimensionless softening parameter
'''
return -2. * b * np.sinh(np.log(10.) / 2.5 * mu + np.log(b))
def flux2mags(x):
'''
Convert flux to magnitudes.
Input:
x Flux in units of the flux at zeroeth magnitude
'''
return -2.5 / np.log(10.) * np.log(x)
def luptitudes2mags(mu, b):
'''
Convert Luptitudes (asinh magnitudes) to standard magnitudes.
Inputs:
mu Luptitudes
b Dimensionless softening parameter
'''
x = luptitudes2flux(mu, b)
return flux2mags(x)
def gc_dist(l_0, b_0, l_1, b_1):
p_0 = np.pi / 180. * l_0
t_0 = np.pi / 180. * b_0
p_1 = np.pi / 180. * l_1
t_1 = np.pi / 180. * b_1
return np.arcsin(np.sqrt(np.sin(0.5*(t_1-t_0))**2 + np.cos(t_0) * np.cos(t_1) * np.sin(0.5*(p_1-p_0))**2))
def lb2pix(nside, l, b, nest=True):
theta = np.pi/180. * (90. - b)
phi = np.pi/180. * l
return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
def pix2lb(nside, ipix, nest=True):
theta, phi = hp.pixelfunc.pix2ang(nside, ipix, nest=True)
l = 180./np.pi * phi
b = 90. - 180./np.pi * theta
return l, b
def adaptive_subdivide(pix_idx, nside, obj,
n_stars_max, n_stars_min=10, nside_max=2048):
# Subdivide pixel
if (len(obj) > n_stars_max[nside]) and (nside < nside_max):
sub_pix_idx = lb2pix(nside*2, obj['l'], obj['b'], nest=True)
# Check that all pixels have more than minimum # of pixels
'''
over_threshold = True
for i in xrange(4 * pix_idx, 4 * pix_idx + 4):
idx = (sub_pix_idx == i)
if np.sum(idx) < n_stars_min:
over_threshold = False
break
if not over_threshold:
return [(nside, pix_idx, obj)]
'''
# Return subdivided pixel
ret = []
for i in xrange(4 * pix_idx, 4 * pix_idx + 4):
idx = (sub_pix_idx == i)
tmp = adaptive_subdivide(i, nside*2, obj[idx],
n_stars_max, n_stars_min, nside_max)
for pix in tmp:
ret.append(pix)
return ret
else:
return [(nside, pix_idx, obj)]
def mapper(qresult, bounds):
obj = lsd.colgroup.fromiter(qresult, blocks=True)
if (obj != None) and (len(obj) > 0):
#
yield (pix_index, obj[block_indices])
def reducer(keyvalue):
pix_index, obj = keyvalue
obj = lsd.colgroup.fromiter(obj, blocks=True)
# Scale errors
err_scale = 1.3
err_floor = 0.02
obj['err'] = np.sqrt((err_scale * obj['err'])**2. + err_floor**2.)
# Find stars with bad detections
mask_zero_mag = (obj['mean'] == 0.)
mask_zero_err = (obj['err'] == 0.)
mask_nan_mag = np.isnan(obj['mean'])
mask_nan_err = np.isnan(obj['err'])
# Set errors for nondetections to some large number
obj['mean'][mask_nan_mag] = 0.
obj['err'][mask_zero_err] = 1.e10
obj['err'][mask_nan_err] = 1.e10
obj['err'][mask_zero_mag] = 1.e10
# Combine and apply the masks
#mask_detect = np.sum(obj['mean'], axis=1).astype(np.bool)
#mask_informative = (np.sum(obj['err'] > 1.e10, axis=1) < 3).astype(np.bool)
#mask_keep = np.logical_and(mask_detect, mask_informative)
#yield (pix_index, obj[mask_keep])
yield (pix_index, obj)
def subdivider(keyvalue, nside, n_stars_max, n_stars_min, nside_max):
pix_index, obj = keyvalue
obj = lsd.colgroup.fromiter(obj, blocks=True)
# Adaptively subdivide pixel
ret = adaptive_subdivide(pix_index, nside, obj,
n_stars_max, n_stars_min, nside_max)
for subpixel in ret:
sub_nside, sub_idx, sub_obj = subpixel
yield ((sub_nside, sub_idx), sub_obj)
def start_file(base_fname, index):
fout = open('%s_%d.in' % (base_fname, index), 'wb')
f.write(np.array([0], dtype=np.uint32).tostring())
return f
def to_file(f, pix_index, nside, nest, EBV, data):
close_file = False
if type(f) == str:
f = h5py.File(fname, 'a')
close_file = True
ds_name = '/photometry/pixel %d-%d' % (nside, pix_index)
ds = f.create_dataset(ds_name, data.shape, data.dtype, chunks=True,
compression='gzip', compression_opts=9)
ds[:] = data[:]
N_stars = data.shape[0]
t,p = hp.pixelfunc.pix2ang(nside, pix_index, nest=nest)
t *= 180. / np.pi
p *= 180. / np.pi
gal_lb = np.array([p, 90. - t], dtype='f8')
att_f8 = np.array([EBV], dtype='f8')
att_u8 = np.array([pix_index], dtype='u8')
att_u4 = np.array([nside, N_stars], dtype='u4')
att_u1 = np.array([nest], dtype='u1')
ds.attrs['healpix_index'] = att_u8[0]
ds.attrs['nested'] = att_u1[0]
ds.attrs['nside'] = att_u4[0]
#ds.attrs['N_stars'] = N_stars
ds.attrs['l'] = gal_lb[0]
ds.attrs['b'] = gal_lb[1]
ds.attrs['EBV'] = att_f8[0]
if close_file:
f.close()
return gal_lb
def main():
parser = argparse.ArgumentParser(
prog='s82_completeness.py',
description='Estimate PS1 completeness by comparison with deep SDSS Stripe 82 photometry.',
add_help=True)
#parser.add_argument('out', type=str, help='Filename for query output.')
parser.add_argument('-b', '--bounds', type=float, nargs=4, default=None,
help='Restrict pixels to region enclosed by: RA_min, RA_max, Dec_min, Dec_max.')
parser.add_argument('--n-bands', type=int, default=4,
help='Min. # of PS1 passbands with detection.')
parser.add_argument('--n-det', type=int, default=4,
help='Min. # of PS1 detections.')
parser.add_argument('-w', '--n-workers', type=int, default=5,
help='# of workers for LSD to use.')
if 'python' in sys.argv[0]:
offset = 2
else:
offset = 1
args = parser.parse_args(sys.argv[offset:])
mplib.rc('text', usetex=True)
n_pointlike = args.n_bands - 1
if n_pointlike == 0:
n_pointlike = 1
# Determine the query bounds
query_bounds = None
if args.bounds != None:
query_bounds = lsd.bounds.rectangle(args.bounds[0], args.bounds[2],
args.bounds[1], args.bounds[3],
coordsys='equ')
query_bounds = lsd.bounds.make_canonical(query_bounds)
# Set up the query
db = lsd.DB(os.environ['LSD_DB'])
query = ("select s82coadd.l as l, s82coadd.b as b, "
"s82coadd.ra as ra_s82, "
"s82coadd.dec as dec_s82, "
"s82coadd.psfcounts as s82_counts, "
"s82coadd.psfcountserr as s82_counts_err, "
"ucal_magsqx_noref.ra as ra_ps1, "
"ucal_magsqx_noref.dec as dec_ps1, "
"ucal_magsqx_noref.mean as ps1_mean, "
"ucal_magsqx_noref.err as ps1_err, "
"ucal_magsqx_noref.mean_ap as ps1_mean_ap, "
"ucal_magsqx_noref.maglimit as ps1_maglim, "
"ucal_magsqx_noref.nmag_ok as ps1_nmag_ok "
"from s82coadd, ucal_magsqx_noref(outer, matchedto=s82coadd, nmax=1, dmax=30) "
"where (s82coadd.objc_type == 6)")
query = db.query(query)
# Execute query
rows = query.fetch(bounds=query_bounds, nworkers=args.n_workers)
# Transform from luptitudes to AB magnitudes
b_s82 = [1.0e-11, 0.43e-11, 0.81e-11, 1.4e-11, 3.7e-11] # Stripe-82 ugriz softening parameters
sdss_bands = 'ugriz'
dtype = [(b, 'f8') for b in sdss_bands]
s82_mags = np.empty(rows['s82_counts'].shape[0], dtype=dtype)
for band, (name, b) in enumerate(zip(sdss_bands, b_s82)):
s82_mags[name][:] = luptitudes2mags(rows['s82_counts'][:, band], b)
# Transform SDSS magnitudes to synthetic PS1 magnitudes
s82_ps1_mags = pssdsstransformall(s82_mags)
# Filter objects which do not have 5-band Stripe-82 detections
idx = np.isfinite(s82_ps1_mags) & (s82_ps1_mags > 0.) & (s82_ps1_mags < 30.)
idx = np.all(idx, axis=1)
print 'Stripe-82 objects filtered: %d of %d' % (np.sum(~idx), len(idx))
rows = rows[idx]
s82_mags = s82_mags[idx]
s82_ps1_mags = s82_ps1_mags[idx]
# Which Stripe-82 objects have PS1 matches
ps1_mags = rows['ps1_mean']
ps1_mask = (ps1_mags > 0.)
match_dist = gc_dist(rows['ra_s82'], rows['dec_s82'], rows['ra_ps1'], rows['dec_ps1'])
max_dist = 1. * np.pi / 180. / 3600. # One arcsecond
idx = (match_dist < max_dist)
for band in xrange(5):
ps1_mask[:, band] = ps1_mask[:, band] & idx
'''
ps1_row_mask = ( (rows['ps1_nmag_ok'][:,0] > 0)
& (np.sum(rows['ps1_nmag_ok'] > 0, axis=1) >= args.n_bands)
& (np.sum(rows['ps1_nmag_ok'], axis=1) >= args.n_det)
& (np.sum(rows['ps1_mean'] - rows['ps1_mean_ap'] < 0.1, axis=1) >= n_pointlike)
)
for band in xrange(ps1_mask.shape[1]):
ps1_mask[:, band] = ps1_mask[:, band] & ps1_row_mask
'''
print 'Stripe-82 detections:', len(rows)
print 'PS1 griz matches:', np.sum(ps1_mask, axis=0)
# PS1 completeness in each magnitude bin
mag_bins = np.linspace(-2., 4., 60)
bin_min = mag_bins[:-1]
bin_max = mag_bins[1:]
mag_bin_center = 0.5 * (bin_min + bin_max)
'''
ps1_pct = np.empty((mag_bins.size-1, 5), dtype='f8')
for band in xrange(5):
for i, (mag_min, mag_max) in enumerate(zip(bin_min, bin_max)):
idx = (s82_ps1_mags[:, band] >= mag_min) & (s82_ps1_mags[:, band] < mag_max)
ps1_pct[i, band] = np.sum(ps1_mask[idx, band], axis=0) / float(np.sum(idx))
'''
# PS1 completeness in different spatial pixels
pix_idx = lb2pix(128, rows['ra_s82'], rows['dec_s82'])
pix_idx_unique = np.unique(pix_idx)
n_pix = pix_idx_unique.size
ps1_pct_area = np.empty((n_pix, mag_bins.size-1, 5), dtype='f8')
maglim_area = np.empty((n_pix, 5), dtype='f8')
n_stars = np.empty(n_pix)
for k, p_idx in enumerate(pix_idx_unique):
in_pix = (pix_idx == p_idx)
n_stars[k] = np.sum(in_pix)
print '%d stars in pixel %d.' % (n_stars[k], k + 1)
s82_ps1_tmp = s82_ps1_mags[in_pix]
ps1_mask_tmp = ps1_mask[in_pix]
for band in xrange(5):
maglim = rows['ps1_maglim'][:, band]
idx = (maglim > 0.)
maglim_area[k, band] = np.median(maglim[idx])
maglim = maglim_area[k, band]
for i, (mag_min, mag_max) in enumerate(zip(bin_min, bin_max)):
idx = (s82_ps1_tmp[:, band] - maglim >= mag_min) & (s82_ps1_tmp[:, band] - maglim < mag_max)
ps1_pct_area[k, i, band] = np.sum(ps1_mask_tmp[idx, band], axis=0) / float(np.sum(idx))
idx = (n_stars > 1000)
ps1_pct_area = np.percentile(ps1_pct_area[idx], [15.87, 50., 84.13], axis=0)
ps1_pct_area = np.array(ps1_pct_area)
idx = (ps1_pct_area[0, :, :] < 1.e-10)
ps1_pct_area[0, idx] = 1.e-10 * (0.5 + np.random.random(ps1_pct_area[0, idx].shape))
idx = (ps1_pct_area[1, :, :] < 1.e-9)
ps1_pct_area[1, idx] = 1.e-9 * (0.5 + np.random.random(ps1_pct_area[0, idx].shape))
idx = (ps1_pct_area[2, :, :] < 1.e-8)
ps1_pct_area[2, idx] = 1.e-8 * (0.5 + np.random.random(ps1_pct_area[0, idx].shape))
# PS1 completeness as a function of mag - maglimit
mag_diff_bins = np.linspace(-6., 4., 100)
bin_min = mag_diff_bins[:-1]
bin_max = mag_diff_bins[1:]
diff_bin_center = 0.5 * (bin_min + bin_max)
ps1_pct_diff = np.empty((mag_diff_bins.size-1, 5), dtype='f8')
s82_ps1_diff = s82_ps1_mags - rows['ps1_maglim']
for band in xrange(5):
idx_maglim = (rows['ps1_maglim'][:, band] > 0.)
print band, np.median(rows['ps1_maglim'][idx_maglim, band])
for i, (mag_min, mag_max) in enumerate(zip(bin_min, bin_max)):
idx_bin = (s82_ps1_diff[:, band] >= mag_min) & (s82_ps1_diff[:, band] < mag_max)
idx = idx_maglim & idx_bin
ps1_pct_diff[i, band] = np.sum(ps1_mask[idx, band], axis=0) / float(np.sum(idx))
# Completeness parameterization
tmp_pct = ps1_pct_area[1, -20:, :]
idx = (tmp_pct > 1.e-5) & np.isfinite(tmp_pct)
comp_floor = np.median(tmp_pct[idx])
print 'Completeness floor: %.2g' % comp_floor
#dm_0 = [0.15, 0.23, 0.17, 0.12, 0.15]
dm_0 = [0.16, 0.16, 0.16, 0.16, 0.16]
dm_1 = 0.20
'''
comp_fit = 0.5 * (1. - scipy.special.erf((dm - 0.15) / 0.47))
ax.plot(dm, comp_fit, lw=2., alpha=0.3, c='orange')
comp_fit = (1. - comp_floor) * comp_fit + comp_floor
'''
#comp_fit = 1. / (1. + np.exp((dm - 0.13) / 0.20))
#comp_fit_floor = (1. - comp_floor) * comp_fit + comp_floor
# Plot completeness
fig = plt.figure(figsize=(9,6), dpi=150)
band_names = ['g', 'r', 'i', 'z', 'y']
plt_colors = ['c', 'b', 'g', 'r', 'gray']
for band, (name, color) in enumerate(zip(band_names, plt_colors)):
maglim = rows['ps1_maglim'][:, band]
idx = (maglim > 0.)
maglim = np.percentile(maglim[idx], [15.87, 50., 84.13])
print 'maglim_%d: %.2f + %.2f - %.2f' % (band,
maglim[1],
maglim[1] - maglim[0],
maglim[2] - maglim[1])
ax = fig.add_subplot(2, 3, band+1)
ax.axvline(x=0., c='k', ls=':', lw=1., alpha=0.2)
ax.axhline(y=1., c='k', ls=':', lw=1., alpha=0.2)
pos = np.all(ps1_pct_area[:, :, band] > 0, axis=0)
ax.fill_between(mag_bin_center,
ps1_pct_area[0, :, band],
ps1_pct_area[2, :, band],
where=pos,
color=color,
edgecolor=color,
alpha=0.5,
label=r'$%s_{\mathrm{P1}}$' % name)
ax.semilogy(mag_bin_center, ps1_pct_area[1, :, band],
c=color, alpha=0.5, label=r'$%s_{\mathrm{P1}}$' % name)
tmp_pct = ps1_pct_area[1, -25:, band]
idx = (tmp_pct > 1.e-5) & np.isfinite(tmp_pct)
comp_floor = np.median(tmp_pct[idx])
print 'Completeness floor %d: %.2g' % (band, comp_floor)
comp_floor = 0.01
dm = np.linspace(-2., 4., 1000)
comp_fit = 1. / (1. + np.exp((dm - dm_0[band]) / dm_1))
comp_fit_floor = (1. - comp_floor) * comp_fit + comp_floor
ax.semilogy(dm, comp_fit, lw=2., alpha=0.5,
c='k', ls='-',
label=r'$\mathrm{Fit}$')
ax.semilogy(dm, comp_fit_floor, lw=1., alpha=0.25,
c='k', ls='--')
ax.set_yscale('log')
ax.set_xlim(-1., 2.)
ax.set_ylim(0.005, 1.5)
if band < 3:
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.set_xticks([-0.5, 0.0, 0.5, 1.0, 1.5])
ax.set_xlabel(r'$\Delta %s_{\mathrm{P1}} \ (\mathrm{mag})$' % name, fontsize=16)
ax.yaxis.set_major_formatter(FormatStrFormatter(r'$%.2f$'))
if band not in [0, 3]:
ax.set_yticklabels([])
fig.text(0.06, 0.5, r'$\mathrm{PS1 \ Completeness}$',
fontsize=16, va='center', ha='center', rotation='vertical')
# Legend
ax = fig.add_subplot(2, 3, 6)
ax.fill_between([0.1, 0.35], [0.7, 0.7], [0.8, 0.8],
color='orange', edgecolor='orange', alpha=0.5)
ax.plot([0.1, 0.35], [0.75, 0.75], color='orange', alpha=0.5)
ax.text(0.45, 0.75, r'$1 \sigma \ \mathrm{Region}$',
ha='left', va='center', fontsize=16)
ax.plot([0.1, 0.35], [0.5, 0.5], lw=2., alpha=0.5,
c='k', ls='-')
ax.text(0.45, 0.5, r'$\mathrm{Fit}$',
ha='left', va='center', fontsize=16)
ax.plot([0.1, 0.35], [0.3, 0.3], lw=1., alpha=0.25,
c='k', ls='--')
ax.text(0.45, 0.3, r'$\mathrm{Fit \ with \ floor}$',
ha='left', va='center', fontsize=16)
ax.set_xlim([0., 1.])
ax.set_ylim([0.05, 1.05])
ax.axis('off')
fig.subplots_adjust(top=0.85, bottom=0.13,
left=0.13, right=0.95,
hspace=0., wspace=0.)
fig.savefig('tmp.png', dpi=300)
#ax.set_title(r'$\mathrm{Absolute}$', fontsize=16)
#ax.set_xlabel(r'$\mathrm{\Delta m_{P1} \ (mag)}$', fontsize=16)
#ax.set_xlabel(r'$\mathrm{Stripe-82 \ asinh \ mag}$', fontsize=14)
#ax.set_ylabel(r'$\mathrm{PS1 \ Completeness}$', fontsize=16)
'''
ax = fig.add_subplot(2,1,2)
band_names = ['g', 'r', 'i', 'z', 'y']
plt_colors = ['c', 'b', 'g', 'r', 'gray']
for band, (name, color) in enumerate(zip(band_names, plt_colors)):
ax.semilogy(diff_bin_center, ps1_pct_diff[:, band], c=color, label=r'$%s$' % name)
ax.set_title(r'$\mathrm{Relative}$', fontsize=16)
ax.set_xlabel(r'$\mathrm{mag \ - \ maglim}$', fontsize=14)
#ax.set_xlabel(r'$\mathrm{Stripe-82 \ asinh \ mag}$', fontsize=14)
ax.set_ylabel(r'$\mathrm{PS1 \ Completeness}$', fontsize=14)
ax.legend()
ax.set_xlim(-1., 2.)
ax.set_ylim(0.005, 1.5)
fig.savefig('tmp.png', dpi=200)
'''
# Histograms of synthetic PS1 vs real PS1 magnitudes
fig = plt.figure()
diff = ps1_mags - s82_ps1_mags
for band, name in enumerate(band_names):
ax = fig.add_subplot(1, 5, band+1)
ax.hist(diff[ps1_mask[:, band], band], bins=50)
ax.set_title(r'$\Delta %s$' % name, fontsize=16)
ax.set_xlabel(r'$\Delta %s \ (\mathrm{mag})$' % name, fontsize=14)
# Color-color diagrams of synthetic PS1 photometry
fig = plt.figure()
for c2 in xrange(4):
for c1 in xrange(c2):
ax = fig.add_subplot(3, 4, 1 + 3*c1 + c2)
diff_1 = s82_ps1_mags[:, c1] - s82_ps1_mags[:, c1+1]
diff_2 = s82_ps1_mags[:, c2] - s82_ps1_mags[:, c2+1]
ax.scatter(diff_1, diff_2, s=1.5, alpha=0.05, edgecolor='none')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.set_xlim(xlim[1], xlim[0])
ax.set_ylim(ylim[1], ylim[0])
plt.show()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
nbeaver/numpy | numpy/core/function_base.py | 23 | 6891 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y = y * delta
else:
# One might be tempted to use faster, in-place multiplication here,
# but this prevents step from overriding what class is produced,
# and thus prevents, e.g., use of Quantities; see gh-7142.
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| bsd-3-clause |
treycausey/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 21 | 4761 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the boostrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
Winand/pandas | pandas/tests/groupby/test_filters.py | 15 | 24350 | # -*- coding: utf-8 -*-
from __future__ import print_function
from numpy import nan
import pytest
from pandas import Timestamp
from pandas.core.index import MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_frame_equal, assert_series_equal
)
from pandas.compat import (lmap)
from pandas import compat
import pandas.core.common as com
import numpy as np
import pandas.util.testing as tm
import pandas as pd
class TestGroupByFilter(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
pytest.raises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = df.groupby(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_series_equal(df['A'], result)
result = grouped['A'].transform(len)
expected = Series([2, 3, 2, 3, 3], name='A')
assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(df, result)
result = grouped.transform('sum')
expected = DataFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
# previously didn't have access to col A #????
filt = g.filter(lambda x: x['A'].sum() == 2)
assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness(self):
df = pd.DataFrame([
['best', 'a', 'x'],
['worst', 'b', 'y'],
['best', 'c', 'x'],
['best', 'd', 'y'],
['worst', 'd', 'y'],
['worst', 'd', 'y'],
['best', 'd', 'z'],
], columns=['a', 'b', 'c'])
with tm.assert_raises_regex(TypeError,
'filter function returned a.*'):
df.groupby('c').filter(lambda g: g['a'] == 'best')
def test_filter_non_bool_raises(self):
df = pd.DataFrame([
['best', 'a', 1],
['worst', 'b', 1],
['best', 'c', 1],
['best', 'd', 1],
['worst', 'd', 1],
['worst', 'd', 1],
['best', 'd', 1],
], columns=['a', 'b', 'c'])
with tm.assert_raises_regex(TypeError,
'filter function returned a.*'):
df.groupby('a').filter(lambda g: g.c.mean())
def test_filter_dropna_with_empty_groups(self):
# GH 10780
data = pd.Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
groupped = data.groupby(level=0)
result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
expected_false = pd.Series([np.nan] * 9,
index=np.repeat([1, 2, 3], 3))
tm.assert_series_equal(result_false, expected_false)
result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
expected_true = pd.Series(index=pd.Index([], dtype=int))
tm.assert_series_equal(result_true, expected_true)
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com._asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in compat.iteritems(expected):
assert (result[k] == v)
| bsd-3-clause |
moutai/scikit-learn | sklearn/metrics/cluster/supervised.py | 12 | 30446 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/linear_model/ransac.py | 12 | 19391 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
max_skips : int, optional
Maximum number of iterations that can be skipped due to finding zero
inliers or invalid data defined by ``is_data_valid`` or invalid models
defined by ``is_model_valid``.
.. versionadded:: 0.19
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
.. deprecated:: 0.18
``residual_metric`` is deprecated from 0.18 and will be removed in
0.20. Use ``loss`` instead.
loss : string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : int, RandomState instance or None, optional, default None
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
n_skips_no_inliers_ : int
Number of iterations skipped due to finding zero inliers.
.. versionadded:: 0.19
n_skips_invalid_data_ : int
Number of iterations skipped due to invalid data defined by
``is_data_valid``.
.. versionadded:: 0.19
n_skips_invalid_model_ : int
Number of iterations skipped due to an invalid model defined by
``is_model_valid``.
.. versionadded:: 0.19
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100, max_skips=np.inf,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.max_skips = max_skips
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' was deprecated in version 0.18 and "
"will be removed in version 0.20. Use 'loss' instead.",
DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
self.n_trials_ = 0
max_trials = self.max_trials
while self.n_trials_ < max_trials:
self.n_trials_ += 1
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
break
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
self.n_skips_invalid_data_ += 1
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
max_trials = min(
max_trials,
_dynamic_max_trials(n_inliers_best, n_samples,
min_samples, self.stop_probability))
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or \
score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if ((self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips):
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*).")
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*).")
else:
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
warnings.warn("RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*).",
UserWarning)
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
billy-inn/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
laszlocsomor/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 3 | 31357 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (
_check_dtype(self._y.dtype) if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
shibanis1/spark-tk | integration-tests/tests/test_frame_pandas.py | 1 | 1212 | from setup import tc, rm, get_sandbox_path
from sparktk import dtypes
def test_frame_to_pandas_to_frame(tc):
"""
Tests going from a frame to a pandas df (download) and then back to a frame (import_pandas)
"""
# Create a frame from a csv file for testing
path = "../datasets/importcsvtest.csv"
frame1 = tc.frame.import_csv(path, header=True, inferschema=True)
# download to data frame and check the columns/types/row count
df = frame1.download()
assert(df.columns.tolist() == ['string_column', 'integer_column', 'float_column', 'datetime_column'])
assert([str(d) for d in df.dtypes] == ['object', 'int32', 'float64', 'datetime64[ns]'])
assert(frame1.row_count == len(df))
# import the data frame back to a frame
frame2 = tc.frame.import_pandas(df, frame1.schema, validate_schema=True)
# compare this frame to the original frame
assert(len(frame1.schema) == len(frame2.schema))
for col1, col2 in zip(frame1.schema, frame2.schema):
assert(col1[0] == col2[0])
assert(dtypes.dtypes.get_from_type(col1[1]) == dtypes.dtypes.get_from_type(col2[1]))
assert(frame2.take(frame2.row_count).data == frame1.take(frame1.row_count).data)
| apache-2.0 |
UNR-AERIAL/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
BobbyKim/plotting-scripts | plotline.py | 1 | 3237 | '''
Generates line plots (up to 4 lines).
'''
import sys
import argparse
import numpy as np
from numpy.random import uniform
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
parser = argparse.ArgumentParser(description='Plots pmf data.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("filename", help="input filename")
parser.add_argument("outname", help="output filename")
parser.add_argument("-dpi", default=150, type=int, help="figure dpi")
parser.add_argument("-x", default=0, type=int, help="x column number in f")
parser.add_argument("-xmin", type=float, help="x axis lower limit")
parser.add_argument("-xmax", type=float, help="x axis upper limit")
parser.add_argument("-y1", default=1, type=int, help="y1 column number in f")
parser.add_argument("-ymin", type=float, help="y axis lower limit")
parser.add_argument("-ymax", type=float, help="y axis upper limit")
parser.add_argument("-y2", type=int, help="y2 column number in f")
parser.add_argument("-y3", type=int, help="y3 column number in f")
parser.add_argument("-y4", type=int, help="y4 column number in f")
parser.add_argument("-y5", type=int, help="y5 column number in f")
parser.add_argument("-y1label", default='', help="y1 legend label")
parser.add_argument("-y2label", default='', help="y2 legend label")
parser.add_argument("-y3label", default='', help="y3 legend label")
parser.add_argument("-y4label", default='', help="y4 legend label")
parser.add_argument("-y5label", default='', help="y5 legend label")
parser.add_argument("-title", default='', help="title")
parser.add_argument("-xlabel", default='', help="xlabel")
parser.add_argument("-ylabel", default='', help="ylabel")
parser.add_argument("-axisfontsize", default=18, type=float, help="font size of xlabel, ylabel")
parser.add_argument("-titlefontsize", default=28, type=float, help="font size of title")
args = parser.parse_args()
mpl.rcParams.update({'font.size': args.axisfontsize})
data = np.loadtxt(args.filename)
plt.hold(True)
L = []
ylabels = []
P1, = plt.plot(data[:,args.x], data[:,args.y1], linewidth=2, color = "#008fd5") # blue
L.append(P1)
ylabels.append(args.y1label)
if args.y2 is not None:
P2, = plt.plot(data[:,args.x], data[:,args.y2], linewidth=2, color = "#77ab43") # green
L.append(P2)
ylabels.append(args.y2label)
if args.y3 is not None:
P3, = plt.plot(data[:,args.x], data[:,args.y3], linewidth=2, color = "#ff2700") # red
L.append(P3)
ylabels.append(args.y3label)
if args.y4 is not None:
P4, = plt.plot(data[:,args.x], data[:,args.y4], linewidth=2, color = "#f6b900") # gold
L.append(P4)
ylabels.append(args.y4label)
if args.y5 is not None:
P5, = plt.plot(data[:,args.x], data[:,args.y5], linewidth=2, color = "#4c0099") # test
L.append(P5)
ylabels.append(args.y5label)
if not (args.xmin and args.xmax):
plt.xlim(args.xmin, args.xmax)
if not (args.ymin and args.ymax):
plt.ylim(args.ymin, args.ymax)
#if len(L) > 1:
# plt.legend(L, ylabels)
plt.xlabel(args.xlabel)
plt.ylabel(args.ylabel)
plt.title(args.title, y=1.02, fontsize = args.titlefontsize)
plt.tight_layout()
#plt.axes().set_aspect('equal')
plt.savefig(args.outname, dpi=args.dpi, bbox_inches='tight')
| gpl-2.0 |
joyxu/autotest | frontend/tko/graphing_utils.py | 2 | 32598 | import base64
import tempfile
import pickle
import datetime
import django
import os.path
import getpass
from math import sqrt
# When you import matplotlib, it tries to write some temp files for better
# performance, and it does that to the directory in MPLCONFIGDIR, or, if that
# doesn't exist, the home directory. Problem is, the home directory is not
# writable when running under Apache, and matplotlib's not smart enough to
# handle that. It does appear smart enough to handle the files going
# away after they are written, though.
temp_dir = os.path.join(tempfile.gettempdir(),
'.matplotlib-%s' % getpass.getuser())
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
os.environ['MPLCONFIGDIR'] = temp_dir
import matplotlib
matplotlib.use('Agg')
import matplotlib.figure
import matplotlib.backends.backend_agg
import StringIO
import colorsys
import PIL.Image
import PIL.ImageChops
from autotest.frontend.afe import readonly_connection
from autotest.frontend.afe.model_logic import ValidationError
from simplejson import encoder
from autotest.client.shared import settings
from autotest.frontend.tko import models, tko_rpc_utils
_FIGURE_DPI = 100
_FIGURE_WIDTH_IN = 10
_FIGURE_BOTTOM_PADDING_IN = 2 # for x-axis labels
_SINGLE_PLOT_HEIGHT = 6
_MULTIPLE_PLOT_HEIGHT_PER_PLOT = 4
_MULTIPLE_PLOT_MARKER_TYPE = 'o'
_MULTIPLE_PLOT_MARKER_SIZE = 4
_SINGLE_PLOT_STYLE = 'bs-' # blue squares with lines connecting
_SINGLE_PLOT_ERROR_BAR_COLOR = 'r'
_LEGEND_FONT_SIZE = 'xx-small'
_LEGEND_HANDLE_LENGTH = 0.03
_LEGEND_NUM_POINTS = 3
_LEGEND_MARKER_TYPE = 'o'
_LINE_XTICK_LABELS_SIZE = 'x-small'
_BAR_XTICK_LABELS_SIZE = 8
_json_encoder = encoder.JSONEncoder()
class NoDataError(Exception):
"""
Exception to raise if the graphing query returned an empty resultset.
"""
def _colors(n):
"""
Generator function for creating n colors. The return value is a tuple
representing the RGB of the color.
"""
for i in xrange(n):
yield colorsys.hsv_to_rgb(float(i) / n, 1.0, 1.0)
def _resort(kernel_labels, list_to_sort):
"""
Resorts a list, using a list of kernel strings as the keys. Returns the
resorted list.
"""
labels = [tko_rpc_utils.KernelString(label) for label in kernel_labels]
resorted_pairs = sorted(zip(labels, list_to_sort))
# We only want the resorted list; we are not interested in the kernel
# strings.
return [pair[1] for pair in resorted_pairs]
def _quote(string):
return "%s%s%s" % ("'", string.replace("'", r"\'"), "'")
_HTML_TEMPLATE = """
<html><head></head><body>
<img src="data:image/png;base64,%s" usemap="#%s"
border="0" alt="graph">
<map name="%s">%s</map>
</body></html>"""
_AREA_TEMPLATE = """
<area shape="rect" coords="%i,%i,%i,%i" title="%s"
href="#"
onclick="%s(%s); return false;">"""
class MetricsPlot(object):
def __init__(self, query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback):
"""
query_dict: dictionary containing the main query and the drilldown
queries. The main query returns a row for each x value. The first
column contains the x-axis label. Subsequent columns contain data
for each series, named by the column names. A column named
'errors-<x>' will be interpreted as errors for the series named <x>.
plot_type: 'Line' or 'Bar', depending on the plot type the user wants
inverted_series: list of series that should be plotted on an inverted
y-axis
normalize_to:
None - do not normalize
'first' - normalize against the first data point
'x__%s' - normalize against the x-axis value %s
'series__%s' - normalize against the series %s
drilldown_callback: name of drilldown callback method.
"""
self.query_dict = query_dict
if plot_type == 'Line':
self.is_line = True
elif plot_type == 'Bar':
self.is_line = False
else:
raise ValidationError({'plot': 'Plot must be either Line or Bar'})
self.plot_type = plot_type
self.inverted_series = inverted_series
self.normalize_to = normalize_to
if self.normalize_to is None:
self.normalize_to = ''
self.drilldown_callback = drilldown_callback
class QualificationHistogram(object):
def __init__(self, query, filter_string, interval, drilldown_callback):
"""
query: the main query to retrieve the pass rate information. The first
column contains the hostnames of all the machines that satisfied the
global filter. The second column (titled 'total') contains the total
number of tests that ran on that machine and satisfied the global
filter. The third column (titled 'good') contains the number of
those tests that passed on that machine.
filter_string: filter to apply to the common global filter to show the
Table View drilldown of a histogram bucket
interval: interval for each bucket. E.g., 10 means that buckets should
be 0-10%, 10%-20%, ...
"""
self.query = query
self.filter_string = filter_string
self.interval = interval
self.drilldown_callback = drilldown_callback
def _create_figure(height_inches):
"""
Creates an instance of matplotlib.figure.Figure, given the height in inches.
Returns the figure and the height in pixels.
"""
fig = matplotlib.figure.Figure(
figsize=(_FIGURE_WIDTH_IN, height_inches + _FIGURE_BOTTOM_PADDING_IN),
dpi=_FIGURE_DPI, facecolor='white')
fig.subplots_adjust(bottom=float(_FIGURE_BOTTOM_PADDING_IN) / height_inches)
return (fig, fig.get_figheight() * _FIGURE_DPI)
def _create_line(plots, labels, plot_info):
"""
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data. Each dict contains:
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
# when we're doing any kind of normalization, all series get put into a
# single plot
single = bool(plot_info.normalize_to)
area_data = []
lines = []
if single:
plot_height = _SINGLE_PLOT_HEIGHT
else:
plot_height = _MULTIPLE_PLOT_HEIGHT_PER_PLOT * len(plots)
figure, height = _create_figure(plot_height)
if single:
subplot = figure.add_subplot(1, 1, 1)
# Plot all the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
needs_invert = (plot['label'] in plot_info.inverted_series)
# Add a new subplot, if user wants multiple subplots
# Also handle axis inversion for subplots here
if not single:
subplot = figure.add_subplot(len(plots), 1, plot_index + 1)
subplot.set_title(plot['label'])
if needs_invert:
# for separate plots, just invert the y-axis
subplot.set_ylim(1, 0)
elif needs_invert:
# for a shared plot (normalized data), need to invert the y values
# manually, since all plots share a y-axis
plot['y'] = [-y for y in plot['y']]
# Plot the series
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
if single:
lines += subplot.plot(plot['x'], plot['y'], label=plot['label'],
marker=_MULTIPLE_PLOT_MARKER_TYPE,
markersize=_MULTIPLE_PLOT_MARKER_SIZE)
error_bar_color = lines[-1].get_color()
else:
lines += subplot.plot(plot['x'], plot['y'], _SINGLE_PLOT_STYLE,
label=plot['label'])
error_bar_color = _SINGLE_PLOT_ERROR_BAR_COLOR
if plot['errors']:
subplot.errorbar(plot['x'], plot['y'], linestyle='None',
yerr=plot['errors'], color=error_bar_color)
subplot.set_xticklabels([])
# Construct the information for the drilldowns.
# We need to do this in a separate loop so that all the data is in
# matplotlib before we start calling transform(); otherwise, it will return
# incorrect data because it hasn't finished adjusting axis limits.
for line in lines:
# Get the pixel coordinates of each point on the figure
x = line.get_xdata()
y = line.get_ydata()
label = line.get_label()
icoords = line.get_transform().transform(zip(x, y))
# Get the appropriate drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes (hover-over tool-tips)
x_labels = [labels[x_val] for x_val in x]
titles = ['%s - %s: %f' % (label, x_label, y_val)
for x_label, y_val in zip(x_labels, y)]
# Get the appropriate parameters for the drilldown query
params = [dict(query=drill, series=line.get_label(), param=x_label)
for x_label in x_labels]
area_data += [dict(left=ix - 5, top=height - iy - 5,
right=ix + 5, bottom=height - iy + 5,
title=title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ix, iy), title, param_dict
in zip(icoords, titles, params)]
subplot.set_xticklabels(labels, rotation=90, size=_LINE_XTICK_LABELS_SIZE)
# Show the legend if there are not multiple subplots
if single:
font_properties = matplotlib.font_manager.FontProperties(
size=_LEGEND_FONT_SIZE)
legend = figure.legend(lines, [plot['label'] for plot in plots],
prop=font_properties,
handlelen=_LEGEND_HANDLE_LENGTH,
numpoints=_LEGEND_NUM_POINTS)
# Workaround for matplotlib not keeping all line markers in the legend -
# it seems if we don't do this, matplotlib won't keep all the line
# markers in the legend.
for line in legend.get_lines():
line.set_marker(_LEGEND_MARKER_TYPE)
return (figure, area_data)
def _get_adjusted_bar(x, bar_width, series_index, num_plots):
"""
Adjust the list 'x' to take the multiple series into account. Each series
should be shifted such that the middle series lies at the appropriate x-axis
tick with the other bars around it. For example, if we had four series
(i.e. four bars per x value), we want to shift the left edges of the bars as
such:
Bar 1: -2 * width
Bar 2: -width
Bar 3: none
Bar 4: width
"""
adjust = (-0.5 * num_plots - 1 + series_index) * bar_width
return [x_val + adjust for x_val in x]
# TODO(showard): merge much of this function with _create_line by extracting and
# parameterizing methods
def _create_bar(plots, labels, plot_info):
"""
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data.
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
area_data = []
bars = []
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
# Set up the plot
subplot = figure.add_subplot(1, 1, 1)
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
subplot.set_xticklabels(labels, rotation=90, size=_BAR_XTICK_LABELS_SIZE)
# draw a bold line at y=0, making it easier to tell if bars are dipping
# below the axis or not.
subplot.axhline(linewidth=2, color='black')
# width here is the width for each bar in the plot. Matplotlib default is
# 0.8.
width = 0.8 / len(plots)
# Plot the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
# Invert the y-axis if needed
if plot['label'] in plot_info.inverted_series:
plot['y'] = [-y for y in plot['y']]
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
bar_data = subplot.bar(adjusted_x, plot['y'],
width=width, yerr=plot['errors'],
facecolor=color,
label=plot['label'])
bars.append(bar_data[0])
# Construct the information for the drilldowns.
# See comment in _create_line for why we need a separate loop to do this.
for plot_index, plot in enumerate(plots):
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
# Let matplotlib plot the data, so that we can get the data-to-image
# coordinate transforms
line = subplot.plot(adjusted_x, plot['y'], linestyle='None')[0]
label = plot['label']
upper_left_coords = line.get_transform().transform(zip(adjusted_x,
plot['y']))
bottom_right_coords = line.get_transform().transform(
[(x + width, 0) for x in adjusted_x])
# Get the drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes
x_labels = [labels[x] for x in plot['x']]
titles = ['%s - %s: %f' % (plot['label'], label, y)
for label, y in zip(x_labels, plot['y'])]
params = [dict(query=drill, series=plot['label'], param=x_label)
for x_label in x_labels]
area_data += [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles,
params)]
figure.legend(bars, [plot['label'] for plot in plots])
return (figure, area_data)
def _normalize(data_values, data_errors, base_values, base_errors):
"""
Normalize the data against a baseline.
data_values: y-values for the to-be-normalized data
data_errors: standard deviations for the to-be-normalized data
base_values: list of values normalize against
base_errors: list of standard deviations for those base values
"""
values = []
for value, base in zip(data_values, base_values):
try:
values.append(100 * (value - base) / base)
except ZeroDivisionError:
# Base is 0.0 so just simplify:
# If value < base: append -100.0;
# If value == base: append 0.0 (obvious); and
# If value > base: append 100.0.
values.append(100 * float(cmp(value, base)))
# Based on error for f(x,y) = 100 * (x - y) / y
if data_errors:
if not base_errors:
base_errors = [0] * len(data_errors)
errors = []
for data, error, base_value, base_error in zip(
data_values, data_errors, base_values, base_errors):
try:
errors.append(sqrt(error ** 2 * (100 / base_value) ** 2
+ base_error ** 2 * (100 * data / base_value ** 2) ** 2
+ error * base_error * (100 / base_value ** 2) ** 2))
except ZeroDivisionError:
# Again, base is 0.0 so do the simple thing.
errors.append(100 * abs(error))
else:
errors = None
return (values, errors)
def _create_png(figure):
"""
Given the matplotlib figure, generate the PNG data for it.
"""
# Draw the image
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
canvas.draw()
size = canvas.get_renderer().get_canvas_width_height()
image_as_string = canvas.tostring_rgb()
image = PIL.Image.fromstring('RGB', size, image_as_string, 'raw', 'RGB', 0,
1)
image_background = PIL.Image.new(image.mode, image.size,
figure.get_facecolor())
# Crop the image to remove surrounding whitespace
non_whitespace = PIL.ImageChops.difference(image, image_background)
bounding_box = non_whitespace.getbbox()
image = image.crop(bounding_box)
image_data = StringIO.StringIO()
image.save(image_data, format='PNG')
return image_data.getvalue(), bounding_box
def _create_image_html(figure, area_data, plot_info):
"""
Given the figure and drilldown data, construct the HTML that will render the
graph as a PNG image, and attach the image map to that image.
figure: figure containing the drawn plot(s)
area_data: list of parameters for each area of the image map. See the
definition of the template string '_AREA_TEMPLATE'
plot_info: a MetricsPlot or QualHistogram
"""
png, bbox = _create_png(figure)
# Construct the list of image map areas
areas = [_AREA_TEMPLATE %
(data['left'] - bbox[0], data['top'] - bbox[1],
data['right'] - bbox[0], data['bottom'] - bbox[1],
data['title'], data['callback'],
_json_encoder.encode(data['callback_arguments'])
.replace('"', '"'))
for data in area_data]
map_name = plot_info.drilldown_callback + '_map'
return _HTML_TEMPLATE % (base64.b64encode(png), map_name, map_name,
'\n'.join(areas))
def _find_plot_by_label(plots, label):
for index, plot in enumerate(plots):
if plot['label'] == label:
return index
raise ValueError('no plot labeled "%s" found' % label)
def _normalize_to_series(plots, base_series):
base_series_index = _find_plot_by_label(plots, base_series)
base_plot = plots[base_series_index]
base_xs = base_plot['x']
base_values = base_plot['y']
base_errors = base_plot['errors']
del plots[base_series_index]
for plot in plots:
old_xs, old_values, old_errors = plot['x'], plot['y'], plot['errors']
new_xs, new_values, new_errors = [], [], []
new_base_values, new_base_errors = [], []
# Select only points in the to-be-normalized data that have a
# corresponding baseline value
for index, x_value in enumerate(old_xs):
try:
base_index = base_xs.index(x_value)
except ValueError:
continue
new_xs.append(x_value)
new_values.append(old_values[index])
new_base_values.append(base_values[base_index])
if old_errors:
new_errors.append(old_errors[index])
new_base_errors.append(base_errors[base_index])
if not new_xs:
raise NoDataError('No normalizable data for series ' +
plot['label'])
plot['x'] = new_xs
plot['y'] = new_values
if old_errors:
plot['errors'] = new_errors
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
new_base_values,
new_base_errors)
def _create_metrics_plot_helper(plot_info, extra_text=None):
"""
Create a metrics plot of the given plot data.
plot_info: a MetricsPlot object.
extra_text: text to show at the uppper-left of the graph
TODO(showard): move some/all of this logic into methods on MetricsPlot
"""
query = plot_info.query_dict['__main__']
cursor = readonly_connection.connection().cursor()
cursor.execute(query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
rows = cursor.fetchall()
# "transpose" rows, so columns[0] is all the values from the first column,
# etc.
columns = zip(*rows)
plots = []
labels = [str(label) for label in columns[0]]
needs_resort = (cursor.description[0][0] == 'kernel')
# Collect all the data for the plot
col = 1
while col < len(cursor.description):
y = columns[col]
label = cursor.description[col][0]
col += 1
if (col < len(cursor.description) and
'errors-' + label == cursor.description[col][0]):
errors = columns[col]
col += 1
else:
errors = None
if needs_resort:
y = _resort(labels, y)
if errors:
errors = _resort(labels, errors)
x = [index for index, value in enumerate(y) if value is not None]
if not x:
raise NoDataError('No data for series ' + label)
y = [y[i] for i in x]
if errors:
errors = [errors[i] for i in x]
plots.append({
'label': label,
'x': x,
'y': y,
'errors': errors
})
if needs_resort:
labels = _resort(labels, labels)
# Normalize the data if necessary
normalize_to = plot_info.normalize_to
if normalize_to == 'first' or normalize_to.startswith('x__'):
if normalize_to != 'first':
baseline = normalize_to[3:]
try:
baseline_index = labels.index(baseline)
except ValueError:
raise ValidationError({
'Normalize': 'Invalid baseline %s' % baseline
})
for plot in plots:
if normalize_to == 'first':
plot_index = 0
else:
try:
plot_index = plot['x'].index(baseline_index)
# if the value is not found, then we cannot normalize
except ValueError:
raise ValidationError({
'Normalize': ('%s does not have a value for %s'
% (plot['label'], normalize_to[3:]))
})
base_values = [plot['y'][plot_index]] * len(plot['y'])
if plot['errors']:
base_errors = [plot['errors'][plot_index]] * len(plot['errors'])
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
base_values,
None or base_errors)
elif normalize_to.startswith('series__'):
base_series = normalize_to[8:]
_normalize_to_series(plots, base_series)
# Call the appropriate function to draw the line or bar plot
if plot_info.is_line:
figure, area_data = _create_line(plots, labels, plot_info)
else:
figure, area_data = _create_bar(plots, labels, plot_info)
# TODO(showard): extract these magic numbers to named constants
if extra_text:
text_y = .95 - .0075 * len(plots)
figure.text(.1, text_y, extra_text, size='xx-small')
return (figure, area_data)
def create_metrics_plot(query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback, extra_text=None):
plot_info = MetricsPlot(query_dict, plot_type, inverted_series,
normalize_to, drilldown_callback)
figure, area_data = _create_metrics_plot_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def _get_hostnames_in_bucket(hist_data, bucket):
"""
Get all the hostnames that constitute a particular bucket in the histogram.
hist_data: list containing tuples of (hostname, pass_rate)
bucket: tuple containing the (low, high) values of the target bucket
"""
return [hostname for hostname, pass_rate in hist_data
if bucket[0] <= pass_rate < bucket[1]]
def _create_qual_histogram_helper(plot_info, extra_text=None):
"""
Create a machine qualification histogram of the given data.
plot_info: a QualificationHistogram
extra_text: text to show at the upper-left of the graph
TODO(showard): move much or all of this into methods on
QualificationHistogram
"""
cursor = readonly_connection.connection().cursor()
cursor.execute(plot_info.query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
# Lists to store the plot data.
# hist_data store tuples of (hostname, pass_rate) for machines that have
# pass rates between 0 and 100%, exclusive.
# no_tests is a list of machines that have run none of the selected tests
# no_pass is a list of machines with 0% pass rate
# perfect is a list of machines with a 100% pass rate
hist_data = []
no_tests = []
no_pass = []
perfect = []
# Construct the lists of data to plot
for hostname, total, good in cursor.fetchall():
if total == 0:
no_tests.append(hostname)
continue
if good == 0:
no_pass.append(hostname)
elif good == total:
perfect.append(hostname)
else:
percentage = 100.0 * good / total
hist_data.append((hostname, percentage))
interval = plot_info.interval
bins = range(0, 100, interval)
if bins[-1] != 100:
bins.append(bins[-1] + interval)
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
subplot = figure.add_subplot(1, 1, 1)
# Plot the data and get all the bars plotted
_, _, bars = subplot.hist([data[1] for data in hist_data],
bins=bins, align='left')
bars += subplot.bar([-interval], len(no_pass),
width=interval, align='center')
bars += subplot.bar([bins[-1]], len(perfect),
width=interval, align='center')
bars += subplot.bar([-3 * interval], len(no_tests),
width=interval, align='center')
buckets = [(bin, min(bin + interval, 100)) for bin in bins[:-1]]
# set the x-axis range to cover all the normal bins plus the three "special"
# ones - N/A (3 intervals left), 0% (1 interval left) ,and 100% (far right)
subplot.set_xlim(-4 * interval, bins[-1] + interval)
subplot.set_xticks([-3 * interval, -interval] + bins + [100 + interval])
subplot.set_xticklabels(['N/A', '0%'] +
['%d%% - <%d%%' % bucket for bucket in buckets] +
['100%'], rotation=90, size='small')
# Find the coordinates on the image for each bar
x = []
y = []
for bar in bars:
x.append(bar.get_x())
y.append(bar.get_height())
f = subplot.plot(x, y, linestyle='None')[0]
upper_left_coords = f.get_transform().transform(zip(x, y))
bottom_right_coords = f.get_transform().transform(
[(x_val + interval, 0) for x_val in x])
# Set the title attributes
titles = ['%d%% - <%d%%: %d machines' % (bucket[0], bucket[1], y_val)
for bucket, y_val in zip(buckets, y)]
titles.append('0%%: %d machines' % len(no_pass))
titles.append('100%%: %d machines' % len(perfect))
titles.append('N/A: %d machines' % len(no_tests))
# Get the hostnames for each bucket in the histogram
names_list = [_get_hostnames_in_bucket(hist_data, bucket)
for bucket in buckets]
names_list += [no_pass, perfect]
if plot_info.filter_string:
plot_info.filter_string += ' AND '
# Construct the list of drilldown parameters to be passed when the user
# clicks on the bar.
params = []
for names in names_list:
if names:
hostnames = ','.join(_quote(hostname) for hostname in names)
hostname_filter = 'hostname IN (%s)' % hostnames
full_filter = plot_info.filter_string + hostname_filter
params.append({'type': 'normal',
'filterString': full_filter})
else:
params.append({'type': 'empty'})
params.append({'type': 'not_applicable',
'hosts': '<br />'.join(no_tests)})
area_data = [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title, callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles, params)]
# TODO(showard): extract these magic numbers to named constants
if extra_text:
figure.text(.1, .95, extra_text, size='xx-small')
return (figure, area_data)
def create_qual_histogram(query, filter_string, interval, drilldown_callback,
extra_text=None):
plot_info = QualificationHistogram(query, filter_string, interval,
drilldown_callback)
figure, area_data = _create_qual_histogram_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def create_embedded_plot(model, update_time):
"""
Given an EmbeddedGraphingQuery object, generate the PNG image for it.
model: EmbeddedGraphingQuery object
update_time: 'Last updated' time
"""
params = pickle.loads(model.params)
extra_text = 'Last updated: %s' % update_time
if model.graph_type == 'metrics':
plot_info = MetricsPlot(query_dict=params['queries'],
plot_type=params['plot'],
inverted_series=params['invert'],
normalize_to=None,
drilldown_callback='')
figure, areas_unused = _create_metrics_plot_helper(plot_info,
extra_text)
elif model.graph_type == 'qual':
plot_info = QualificationHistogram(
query=params['query'], filter_string=params['filter_string'],
interval=params['interval'], drilldown_callback='')
figure, areas_unused = _create_qual_histogram_helper(plot_info,
extra_text)
else:
raise ValueError('Invalid graph_type %s' % model.graph_type)
image, bounding_box_unused = _create_png(figure)
return image
_cache_timeout = settings.settings.get_value('AUTOTEST_WEB',
'graph_cache_creation_timeout_minutes')
def handle_plot_request(id, max_age):
"""
Given the embedding id of a graph, generate a PNG of the embedded graph
associated with that id.
id: id of the embedded graph
max_age: maximum age, in minutes, that a cached version should be held
"""
model = models.EmbeddedGraphingQuery.objects.get(id=id)
# Check if the cached image needs to be updated
now = datetime.datetime.now()
update_time = model.last_updated + datetime.timedelta(minutes=int(max_age))
if now > update_time:
cursor = django.db.connection.cursor()
# We want this query to update the refresh_time only once, even if
# multiple threads are running it at the same time. That is, only the
# first thread will win the race, and it will be the one to update the
# cached image; all other threads will show that they updated 0 rows
query = """
UPDATE embedded_graphing_queries
SET refresh_time = NOW()
WHERE id = %s AND (
refresh_time IS NULL OR
refresh_time + INTERVAL %s MINUTE < NOW()
)
"""
cursor.execute(query, (id, _cache_timeout))
# Only refresh the cached image if we were successful in updating the
# refresh time
if cursor.rowcount:
model.cached_png = create_embedded_plot(model, now.ctime())
model.last_updated = now
model.refresh_time = None
model.save()
return model.cached_png
| gpl-2.0 |
mfouesneau/tap | examples/ipython_notebook.py | 2 | 26219 | """
Some tools for the notebooks
"""
from IPython.display import display, Markdown
try:
from nbconvert.filters.markdown import markdown2latex, markdown2html
except ImportError:
from IPython.nbconvert.filters.markdown import markdown2latex, markdown2html
from IPython.display import DisplayObject
import time as _time
import sys
class Caption(Markdown):
""" Make a caption to associate with figures """
def __init__(self, s, center=False, **kwargs):
Markdown.__init__(self, s, **kwargs)
self._center = center
def _repr_html_(self):
txt = markdown2html(self.data)
if self._center:
return '<center>{0}</center>'.format(txt)
else:
return '{0}'.format(txt)
def _repr_latex_(self):
txt = markdown2latex(self.data)
if self._center:
return '\\begin{center}\n' + txt + '\n\\end{center}'
else:
return txt
def display(self):
display(self)
def __str__(self):
return self._repr_latex_()
class Matrix(object):
""" Make a caption to associate with figures """
def __init__(self,s, fmt='%0.4g'):
self.s = s
self._fmt = fmt
def _repr_(self):
text = r"""\begin{bmatrix}"""
t = []
for k in self.s:
t.append( ' & '.join([self._fmt % v for v in k] ) + r'\\' )
text += ''.join(t)
text += r"""\end{bmatrix}"""
return Markdown(text)
def _repr_latex_(self):
text = r"""\begin{bmatrix}"""
t = []
for k in self.s:
t.append( ' & '.join([self._fmt % v for v in k] ) + r'\\' )
text += ''.join(t)
text += r"""\end{bmatrix}"""
return text
def __str__(self):
return self._repr_latex_()
def display(self):
display(self)
def disp_markdown(*args):
return display(Markdown(*args))
def load_latex_macros():
return disp_markdown(open('notebook_macros').read())
def add_input_toggle():
from IPython.display import HTML, display
r = HTML('''
<script>
$( document ).ready(function () {
IPython.CodeCell.options_default['cm_config']['lineWrapping'] = true;
IPython.notebook.get_selected_cell()
IPython.toolbar.add_buttons_group([
{
'label' : 'toggle all input cells',
'icon' : 'fa-eye-slash',
'callback': function(){ $('div.input').slideToggle(); }
}
]);
});
</script>
''')
display(r)
return r
def add_citation_button():
from IPython.display import HTML, display
r = HTML("""
<script>
function insert_citn() {
// Build paragraphs of cell type and count
var entry_box = $('<input type="text"/>');
var body = $('<div><p> Enter the Bibtex reference to insert </p><form>').append(entry_box)
.append('</form></div>');
// Show a modal dialog with the stats
IPython.dialog.modal({
notebook: IPython.notebook,
keyboard_manager: IPython.notebook.keyboard_manager,
title: "Bibtex reference insertion",
body: body,
open: function() {
// Submit on pressing enter
var that = $(this);
that.find('form').submit(function () {
that.find('.btn-primary').first().click();
return false;
});
entry_box.focus();
},
buttons : {
"Cancel" : {},
"Insert" : {
"class" : "btn-primary",
"click" : function() {
// Retrieve the selected citation, add to metadata,
var citation = entry_box.val();
// if (!citation) {return;}
var citn_html = '<cite data-cite="' + citation + '">' + citation + '</cite>';
var cell = IPython.notebook.get_selected_cell();
cell.code_mirror.replaceSelection(citn_html);
}
}
}
});
};
$( document ).ready(function () {
IPython.toolbar.add_buttons_group([
{
'label' : 'insert bibtex reference in markdown',
'icon' : 'fa-graduation-cap', // http://fontawesome.io/icons/
'callback': insert_citn,
}
]);
});
</script>
<style>
cite {
font-style: normal;
color: #45749e;
}
</style>
""")
display(r)
return r
class PDF(object):
def __init__(self,url):
self.url = url
def _repr_html_(self):
return '<iframe src=%s></iframe>' % self.url
def _repr_latex_(self):
return r'\begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{%s}\end{center}' % self.url
class Table(DisplayObject):
VDOTS = object()
def __init__(self, data, headings=None, formats=None, caption=None,
label=None, position='h', subtables=1):
"""
A HTML/LaTeX IPython DisplayObject Table
`data` should be a 2 dimensional array, indexed by row then column,
with an optional extra row `headings`.
A 'row' (i.e., an element of `data`) may also be
:py:const:`Table.VDOTS`, which produces vertical dots in all columns.
`formats` may be a string, whose format method will be used for every
cell; a function, called for every cell; or a mixed array of strings
and functions which is zipped with each row.
Headings are not formatted.
`caption` and `label` add the relevant LaTeX markup, and will go in
the first row of the HTML copy. `label` will have ``tab:`` prepended
to it.
If `subtables` is greater than 1, the table will be split into
`subtables` parts of approximately equal length, and laid out side
by side.
"""
if len(data) == 0:
raise ValueError("data is empty")
if label is None != caption is None:
raise ValueError("specify neither or both of label & caption")
self.columns = len(data[0])
if self.columns == 0:
raise ValueError("no columns")
if headings and len(headings) != self.columns:
raise ValueError("bad headings length")
if isinstance(formats, str):
formats = [formats.format] * self.columns
elif callable(formats):
formats = [formats] * self.columns
elif formats:
if len(formats) != self.columns:
raise ValueError("bad formats length")
def maybe_string_format(f):
if isinstance(f, str):
return f.format
else:
assert callable(f)
return f
formats = list(map(maybe_string_format, formats))
else:
formats = [self._default_format] * self.columns
for i, row in enumerate(data):
if row is not self.VDOTS and len(row) != self.columns:
raise ValueError("bad row length", i)
self.headings = headings
self.data = data
self.formats = formats
self.caption = caption
self.label = label
self.position = position
self.subtables = subtables
@staticmethod
def _default_format(what):
if isinstance(what, float):
return "{0:.5f}".format(what)
else:
return str(what)
def _format_rows(self):
for row in self.data:
if row is self.VDOTS:
yield self.VDOTS
else:
yield (f(x) for f, x in zip(self.formats, row))
def _subtables_split(self):
assert self.subtables > 1
rows = list(self._format_rows())
nominal_height = len(rows) // self.subtables
remainder = len(rows) % self.subtables
heights = [nominal_height] * self.subtables
for i in range(remainder):
heights[i] += 1
slices = []
acc = 0
for l in heights:
slices.append((acc, acc + l))
acc += l
assert slices[-1][1] == len(rows)
subtables = [rows[a:b] for a, b in slices]
return subtables
def _repr_latex_(self):
strings = []
strings.append(r"""
\begin{table}[""" + self.position + r"""]
\centering
""")
if self.label:
strings.append(r"\caption{" + self.caption + "}")
strings.append(r"\label{tab:" + self.label + "}")
if self.subtables > 1:
subtables = self._subtables_split()
width = "{:.3f}\linewidth".format(0.95 / self.subtables)
for i, rows in enumerate(subtables):
strings.append(r"\begin{{subtable}}[t]{{{0}}}%".format(width))
strings.append(r"""
\centering
\vspace{0pt}
""")
self._latex_tabular(strings, rows)
strings.append(r"\end{subtable}%")
if i != len(subtables) - 1:
strings.append("\hfill%")
else:
rows = self._format_rows()
self._latex_tabular(strings, rows)
strings.append(r"""
\end{table}
""")
return "\n".join(strings)
def _latex_tabular(self, strings, rows):
x = "|".join(["c"] * self.columns)
strings.append(r"\begin{tabular}{|" + x + "|}")
strings.append(r"\hline")
if self.headings:
latex = " & ".join(str(x) for x in self.headings)
strings.append(latex + r" \\")
strings.append(r"\hline")
for row in rows:
if row is self.VDOTS:
row = [r"\vdots"] * self.columns
latex = " & ".join(row)
strings.append(latex + r" \\")
strings.append(r"""
\hline
\end{tabular}%""")
def _repr_html_(self):
strings = []
strings.append("""
<style type="text/css">
.util_Table td { text-align: center; }
.util_Table tbody tr, .util_Table tbody td {
border-bottom: 0;
border-top: 0;
}
.util_Table_subtable {
float: left;
}
</style>
""")
if self.label:
c = self.caption
l = "<code>[{}]</code>".format(self.label)
strings.append("""
<h3>{1} {2}</h3>
""".format(self.columns, c, l))
if self.subtables > 1:
subtables = self._subtables_split()
# width = 0.95 / self.subtables
strings.append("<div class='clearfix'>")
for rows in subtables:
strings.append("<div class='util_Table_subtable'>")
self._html_table(strings, rows)
strings.append("</div>")
strings.append("</div>")
else:
rows = self._format_rows()
self._html_table(strings, rows)
return "\n".join(strings)
def _html_table(self, strings, rows):
strings.append("<table class='util_Table'>")
if self.headings:
strings.append("<thead>")
strings.append("<tr>")
headings = map("<th>{0}</th>".format, self.headings)
strings.append("\n".join(headings))
strings.append("</tr>")
strings.append("</thead>")
strings.append("<tbody>")
for row in rows:
if row is self.VDOTS:
row = ["\u22ee"] * self.columns
strings.append("<tr>")
row = map("<td>{0}</td>".format, row)
strings.append("\n".join(row))
strings.append("</tr>")
strings.append("</tbody>")
strings.append("</table>")
def __repr__(self):
if self.headings:
widths = [len(x) for x in self.headings]
data = [self.headings]
else:
widths = None
data = []
# don't forget - self._format_rows() is a generator that yields generators
for row in self._format_rows():
if row is self.VDOTS:
continue
r = list(row)
w = [len(x) for x in r]
if widths is None:
widths = w
else:
widths = [max(a, b) for a, b in zip(widths, w)]
data.append(list(r))
strings = []
if self.label:
c = self.caption.replace("\n", " ")
strings.append('Table: {0} ({1})'.format(self.label, c))
for row in data:
if row is self.VDOTS:
strings.append('...')
else:
r = [x.ljust(b + 4) for x, b in zip(row, widths)]
strings.append(''.join(r))
return '\n'.join(strings)
def __html__(self):
return self._repr_html_()
class LatexFigure(object):
extension = 'pdf'
def __init__(self, label, caption, fig=None, position="", star=False,
options='width=\columnwidth', margin=False):
"""
A LaTeX IPython DisplayObject Figure
`label` is mandatory, since it also sets the filename. It will
have ``fig:`` preprended to it.
`fig` is optional - the current figure (via ``gcf``) will be used
if it is not set.
`position` is either the float placement specifier or the subfigure
vertical position.
If `subfigure` is set to true, a subfigure with width `width` will
be created.
The figure is saved (via ``savefig``) as a PDF file in the current
directory.
Displaying the object produces LaTeX (only) to embed the figure.
A little hacky, but since this is meant for use in the notebook
it is assumed that the figure is going to be displayed automatically
in HTML independently.
"""
if fig is None:
from matplotlib.pyplot import gcf
fig = gcf()
self.label = label
self.caption = caption
self.fig = fig
self.position = position
self.options = options
self.star = star
self.margin = margin
self.filename = "figure_{0:s}.{1:s}".format(label, self.__class__.extension)
import pylab as plt
try:
plt.savefig(self.filename, bbox_inches='tight')
except:
plt.savefig(self.filename)
def _repr_html_(self):
# Bit crude. Hide ourselves to the notebook viewer, since we'll
# have been shown already anyway.
# Nicer solutions are afaict infeasible.
return markdown2html('> **Figure (<a name="fig:{label:s}">{label:s}</a>)**: {caption:s}'.format(
label=self.label, caption=self.caption))
def _repr_latex_(self, subfigure=None):
if subfigure:
environment = "subfigure"
args = "[{position}]{{{width}}}".format(**subfigure)
else:
environment = "figure"
args = "[{0}]".format(self.position)
args = args.replace('[]', '')
if self.star:
environment += '*'
elif self.margin & (not subfigure):
environment = "margin" + environment
return r"""\begin{{{env:s}}}{args:s}
\centering
\includegraphics[{options:s}]{{{fname:s}}}
\caption{{{caption:s}}}
\label{{fig:{label:s}}}
\end{{{env:s}}}
""".format(env=environment, args=args, options=self.options,
fname=self.filename, caption=self.caption,
label=self.label)
def __repr__(self):
c = self.caption.replace("\n", " ")
return "Figure: {0} ({1})".format(self.label, c)
def __html__(self):
return ""
class LatexSubfigures(object):
def __init__(self, label, caption, figures, position='h',
subfigure_position='b', star=False):
"""
Displays several :cls:`LatexFigures` as sub-figures, two per row.
`figures` should be an array of :cls:`LatexFigure` objects, not
:cls:`matplotlib.Figure` objects.
"""
self.label = label
self.caption = caption
self.figures = figures
self.position = position
self.subfigure_position = subfigure_position
self.star = star
def _repr_html_(self):
# Bit crude. Hide ourselves to the notebook viewer, since we'll
# have been shown already anyway.
# Nicer solutions are afaict infeasible.
return markdown2html('> **Figure (<a name="fig:{label:s}">{label:s}</a>)**: {caption:s}'.format(
label=self.label, caption=self.caption))
def _repr_latex_(self):
strings = []
environment = "figure"
if self.star:
environment += '*'
strings.append(r"""\begin{""" + environment + """}[""" + self.position + r"""]
\centering
""")
#left = True
#first = True
opts = {"position": self.subfigure_position,
"width": "{0:0.2f}\linewidth".format((1 - len(self.figures) * 0.01) / len(self.figures))}
for f in self.figures:
#if left and not first:
# strings.append(r"\vspace{1em}")
# have to be quite careful about whitespace
latex = f._repr_latex_(subfigure=opts).strip()
#if left:
# latex += '%'
#else:
# latex += r'\newline'
#first = False
#left = not left
strings.append(latex)
strings.append(r"""
\caption{""" + self.caption + r"""}
\label{fig:""" + self.label + r"""}
\end{""" + environment + """}
""")
return "\n".join(strings)
def __repr__(self):
c = self.caption.replace("\n", " ")
strings = ["Figure group: {0} ({1})".format(self.label, c)]
strings += [repr(x) for x in self.figures]
return "\n".join(strings)
def __html__(self):
return ""
class LatexNumberFormatter(object):
"""
Format floats in exponent notation using latex markup for the exponent
e.g., ``$-4.234 \\times 10^{-5}$``
Usage:
>>> fmtr = LatexNumberFormatter(sf=4)
>>> fmtr(-4.234e-5)
"$-4.234 \\\\times 10^{-5}$"
"""
def __init__(self, sf=10):
"""Create a callable object that formats numbers"""
self.sf = sf
self.s_fmt = "{{:.{0}e}}".format(self.sf)
def __call__(self, n):
"""Format `n`"""
n = self.s_fmt.format(n)
n, e, exp = n.partition("e")
if e == "e":
exp = int(exp)
if not n.startswith("-"):
n = r"\phantom{-}" + n
return r"${} \times 10^{{{}}}$".format(n, exp)
else:
return "${}$".format(n)
"""
Simple progressbar
==================
This package implement a unique progress bar class that can be used to decorate
an iterator, a function or even standalone.
The format of the meter is flexible and can display along with the progress
meter, the running time, an eta, and the rate of the iterations.
An example is:
description [----------] k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec]
"""
class NBPbar(object):
"""
make a progress string in a shape of:
[----------] k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec]
Attributes
---------
time: bool, optional (default: True)
if set, add the runtime information
eta: bool, optional (default: True)
if set, add an estimated time to completion
rate: bool, optional (default: True)
if set, add the rate information
length: int, optional (default: None)
number of characters showing the progress meter itself
if None, the meter will adapt to the buffer width
TODO: make it variable with the buffer length
keep: bool, optional (default: True)
If not set, deletes its traces from screen after completion
file: buffer
the buffer to write into
mininterval: float (default: 0.5)
minimum time in seconds between two updates of the meter
miniters: int, optional (default: 1)
minimum iteration number between two updates of the meter
units: str, optional (default: 'iters')
unit of the iteration
"""
def __init__(self, desc=None, maxval=None, time=True, eta=True, rate=True, length=None,
file=None, keep=True, mininterval=0.5, miniters=1, units='iters', **kwargs):
self.time = time
self.eta = eta
self.rate = rate
self.desc = desc or ''
self.units = units
self.file = file or sys.stdout
self._last_print_len = 0
self.keep = keep
self.mininterval = mininterval
self.miniters = miniters
self._auto_width = True
self.length = 10
if length is not None:
self.length = length
self._auto_width = False
# backward compatibility
self._start_t = _time.time()
self._maxval = maxval
if 'txt' in kwargs:
self.desc = kwargs['txt']
self._F = None
@staticmethod
def format_interval(t):
""" make a human readable time interval decomposed into days, hours,
minutes and seconds
Parameters
----------
t: int
interval in seconds
Returns
-------
txt: str
string representing the interval
(format: <days>d <hrs>:<min>:<sec>)
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
d, h = divmod(h, 24)
txt = '{m:02d}:{s:02d}'
if h:
txt = '{h:02d}:' + txt
if d:
txt = '{d:d}d ' + txt
return txt.format(d=d, h=h, m=m, s=s)
def build_str_meter(self, n, total, elapsed):
"""
make a progress string in a shape of:
k/n 10% [time: 00:00:00, eta: 00:00:00, 2.7 iters/sec]
Parameters
----------
n: int
number of finished iterations
total: int
total number of iterations, or None
elapsed: int
number of seconds passed since start
Returns
-------
txt: str
string representing the meter
"""
if n > total:
total = None
vals = {'n': n}
vals['elapsed'] = self.format_interval(elapsed)
vals['rate'] = '{0:5.2f}'.format((n / elapsed)) if elapsed else '?'
vals['units'] = self.units
if not total:
txt = '{desc:s} {n:d}'
else:
txt = '{desc:s} {n:d}/{total:d} {percent:s}'
if self.time or self.eta or self.rate:
txt += ' ['
info = []
if self.time:
info.append('time: {elapsed:s}')
if self.eta and total:
info.append('eta: {left:s}')
if self.rate:
info.append('{rate:s} {units:s}/sec')
txt += ', '.join(info) + ']'
if not total:
return txt.format(**vals)
frac = float(n) / total
vals['desc'] = self.desc
vals['percent'] = '{0:3.0%}'.format(frac)
vals['left'] = self.format_interval(elapsed / n * (total - n)) if n else '?'
vals['total'] = total
return txt.format(**vals)
def print_status(self, n, total, elapsed):
from IPython.html.widgets import FloatProgress
desc = self.build_str_meter(n, total, elapsed)
if self._F is None:
self._F = FloatProgress(min=0, max=total, description=desc)
display(self._F)
self._F.value = n
self._F.description = desc
def iterover(self, iterable, total=None):
"""
Get an iterable object, and return an iterator which acts exactly like the
iterable, but prints a progress meter and updates it every time a value is
requested.
Parameters
----------
iterable: generator or iterable object
object to iter over.
total: int, optional
the number of iterations is assumed to be the length of the
iterator. But sometimes the iterable has no associated length or
its length is not the actual number of future iterations. In this
case, total can be set to define the number of iterations.
Returns
-------
gen: generator
pass the values from the initial iterator
"""
if total is None:
try:
total = len(iterable)
except TypeError:
total = self._maxval
self.print_status(0, total, 0)
last_print_n = 0
start_t = last_print_t = _time.time()
for n, obj in enumerate(iterable):
yield obj
if n - last_print_n >= self.miniters:
cur_t = _time.time()
if cur_t - last_print_t >= self.mininterval:
self.print_status(n, total, cur_t - start_t)
last_print_n = n
last_print_t = cur_t
if self.keep:
if last_print_n < n:
cur_t = _time.time()
self.print_status(n, total, cur_t - start_t)
self.file.write('\n')
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
return False
def update(self, n, desc=None, total=None):
""" Kept for backward compatibility and the decorator feature """
if total is None:
total = self._maxval
if desc is not None:
self.desc = desc
cur_t = _time.time()
self.print_status(n, total, cur_t - self._start_t)
| mit |
BigTone2009/sms-tools | lectures/09-Sound-description/plots-code/k-means.py | 25 | 1714 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os, sys
from scipy.cluster.vq import vq, kmeans, whiten
from numpy import random
import pickle
n = 30
features = np.hstack((np.array([np.random.normal(-2,1.1,n), np.random.normal(-2,1.1,n)]), np.array([np.random.normal(2,1.5,n), np.random.normal(2,1.5,n)])))
whitened = np.transpose(features)
nClusters = 2
arr = np.arange(whitened.shape[0])
np.random.shuffle(arr)
seeds = np.array([[-2, 1], [2, -1]])
color = [ 'r', 'c', 'c', 'm']
plt.figure(1, figsize=(9.5, 4))
plt.subplot(1,3,1)
plt.scatter(whitened[:,0],whitened[:,1], c='b', alpha=0.75, s=50, edgecolor='none')
plt.subplot(1,3,2)
clusResults = -1*np.ones(whitened.shape[0])
for ii in range(whitened.shape[0]):
diff = seeds - whitened[ii,:]
diff = np.sum(np.power(diff,2), axis = 1)
indMin = np.argmin(diff)
clusResults[ii] = indMin
for pp in range(nClusters):
plt.scatter(whitened[clusResults==pp,0],whitened[clusResults==pp,1], c=color[pp], alpha=0.75, s=50, edgecolor='none')
plt.scatter(seeds[:,0],seeds[:,1], c=color[:nClusters], alpha=1, s=80)
plt.subplot(1,3,3)
centroids, distortion = kmeans(whitened, seeds, iter=40)
clusResults = -1*np.ones(whitened.shape[0])
for ii in range(whitened.shape[0]):
diff = centroids - whitened[ii,:]
diff = np.sum(np.power(diff,2), axis = 1)
indMin = np.argmin(diff)
clusResults[ii] = indMin
for pp in range(nClusters):
plt.scatter(whitened[clusResults==pp,0],whitened[clusResults==pp,1], c=color[pp], s=50, alpha=0.75, edgecolor='none')
plt.scatter(centroids[:,0],centroids[:,1], c=color[:nClusters], alpha=1, s=80)
plt.tight_layout()
plt.savefig('k-means.png')
plt.show()
| agpl-3.0 |
anl-polaris/polaris-analyser | polaris_dialog.py | 2 | 16339 | from PyQt4 import QtCore, QtGui, QtSql
from ui_polaris import Ui_Polaris
from geo_tools import *
from qgis.core import *
from qgis.utils import *
import os
import matplotlib
import numpy as np
import pylab as P
import random
import math
import mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
from db_plot import Ui_DBPlot
from db_plot1 import Ui_DBPlot1
from ui_time_span import Ui_timeSpanDialog
import re
class CreateLayerTread(QtCore.QThread):
partDone = QtCore.pyqtSignal(int)
allDone = QtCore.pyqtSignal()
def __init__(self, db_path, start_time, end_time):
QtCore.QThread.__init__(self)
self.db_path = db_path
self.n = 1000000
self.count = 0
self.start_time = start_time
self.end_time = end_time
QtGui.QMessageBox.about(None, "Generating Layer", "Start time: %s\nEnd time: %s"%(str(self.start_time), str(self.end_time)))
def status(self):
self.count += 1
self.partDone.emit(self.count*self.n)
#self.emit( QtCore.SIGNAL('update(QString)'), str(self.count*self.n) )
def run(self):
self.partDone.emit(0)
#self.conn = sqlite3.connect(fileName)
self.conn = sqlite3.connect(self.db_path)
#QtGui.QMessageBox.about(self, "My message box", "Connected")
#QtGui.Message.about(self, "1", "2")
self.partDone.emit(2)
c = sqlite3.connect('../test_case/chicago-Result.sqlite')
c.enable_load_extension(1)
self.partDone.emit(3)
c.load_extension('./spatialite4.dll')
self.partDone.emit(4)
if int(c.execute("select CheckSpatialMetaData()").fetchone()[0] ) != 3:
c.execute("select InitSpatialMetaData()")
self.partDone.emit(5)
c.execute("select DiscardGeometryColumn('link_moe', 'GEO')")
self.partDone.emit(6)
c.execute("drop table if exists link_moe")
self.partDone.emit(7)
c.commit()
self.partDone.emit(8)
self.conn.execute("attach database \'../test_case/chicago-Result.sqlite\' as res")
#self.conn.execute("create table res.link_moe as select GEO, link, type, fspd_ab, fspd_ba, lanes_ab, lanes_ba from link")
self.conn.set_progress_handler(self.status, self.n)
c.set_progress_handler(self.status, self.n)
self.partDone.emit(9)
self.conn.execute("""create table res.link_moe as
select
GEO,
link_uid,
avg(link_travel_time) as tt,
avg(link_queue_length) as link_queue_length,
avg(link_travel_delay) as link_travel_delay,
avg(link_speed) as link_speed,
avg(link_density) as link_density,
avg(link_in_flow_rate) as link_in_flow_rate,
avg(link_out_flow_rate) as link_out_flow_rate,
avg(link_in_volume) as link_in_volume,
avg(link_out_volume) as link_out_volume,
avg(link_speed_ratio) as link_speed_ratio,
avg(link_in_flow_ratio) as link_in_flow_ratio,
avg(link_out_flow_ratio) as link_out_flow_ratio,
avg(link_density_ratio) as link_density_ratio,
avg(link_travel_time_ratio) as link_travel_time_ratio,
avg(num_vehicles_in_link) as num_vehicles_in_link,
avg(link_speed) as link_speed
from LinkMOE, link
where start_time < ? and start_time > ? and link.link == LinkMOE.link_uid/2 and lanes_ab > 0
group by link_uid""", (self.end_time, self.start_time,))
self.conn.commit()
c.execute("select RecoverGeometryColumn('link_moe', 'GEO', 26916, 'LINESTRING', 'XY')")
c.commit()
c.close()
self.allDone.emit()
return
class DBPlot1(QtGui.QWidget):
def __init__(self, conn):
QtGui.QWidget.__init__(self)
self.ui = Ui_DBPlot1()
self.ui.setupUi(self)
self.ui.plotButton.clicked.connect(self.plot_xy)
self.fig = mpl.MyMplCanvas(self, width=6, height=5, dpi=100)
self.ui.verticalLayout.addWidget(self.fig.mpl_toolbar)
self.ui.verticalLayout.addWidget(self.fig)
self.conn = conn
self.ui.whereText.setPlainText("type where statement here...")
ind = 0
res = self.conn.execute('SELECT name FROM sqlite_master WHERE type=\'table\' and rootpage < 10000 and rootpage!=0 and name NOT LIKE (\'sqlite%\') order by name ').fetchall()
for item in res:
self.ui.comboTable.insertItem(ind,item[0])
ind += 1
res = self.conn.execute('SELECT name FROM res.sqlite_master WHERE type=\'table\' and rootpage < 10000 and rootpage!=0 and name NOT LIKE (\'sqlite%\') order by name ').fetchall()
for item in res:
self.ui.comboTable.insertItem(ind,item[0])
ind += 1
QtCore.QObject.connect(self.ui.comboTable, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.populate_xy)
def populate_xy(self, table):
res = self.conn.execute('pragma table_info(%s)'%table).fetchall()
ind = 0
self.ui.comboX.clear()
self.ui.comboY.clear()
for item in res:
if item[2] not in ["INTEGER", "REAL"]:
continue
self.ui.comboX.insertItem(ind, item[1])
self.ui.comboY.insertItem(ind, item[1])
ind += 1
pass
def plot_xy(self, text):
self.fig.axes.clear()
xcol = self.ui.comboX.currentText()
ycol = self.ui.comboY.currentText()
table = self.ui.comboTable.currentText()
res = self.conn.execute('select %s, %s from %s where %s'%(xcol, ycol, table, str(self.ui.whereText.toPlainText()))).fetchall()
x = []
y = []
for item in res:
x.append(item[0])
y.append(item[1])
self.fig.axes.plot(x,y,'*')
self.fig.axes.set_xlabel(str(xcol).capitalize())
self.fig.axes.set_ylabel(str(ycol).capitalize())
self.fig.draw()
class TimeSpanDialog(QtGui.QDialog):
ok_signal = QtCore.pyqtSignal(int, int)
def __init__(self):
QtGui.QDialog.__init__(self)
self.ui = Ui_timeSpanDialog()
self.ui.setupUi(self)
QtCore.QObject.connect(self.ui.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), self.accept)
def accept(self):
start = self.ui.timeStart.time()
end = self.ui.timeEnd.time()
start_sec = start.hour()*60*60 + start.minute()*60 + start.second()
end_sec = end.hour()*60*60 + end.minute()*60 + end.second()
#QtGui.QMessageBox.about(self, "Debug", str(start_sec))
#QtGui.QMessageBox.about(self, "Debug", str(end_sec))
self.ok_signal.emit(start_sec,end_sec)
self.close()
#DD MATPLOTLIB TO THE db plot widget
class DBPlot(QtGui.QWidget):
def __init__(self, conn):
QtGui.QWidget.__init__(self)
self.ui = Ui_DBPlot()
self.ui.setupUi(self)
self.ui.plotButton.clicked.connect(self.plot_xy)
self.fig = mpl.MyMplCanvas(self, width=6, height=5, dpi=100)
self.ui.verticalLayout.addWidget(self.fig.mpl_toolbar)
self.ui.verticalLayout.addWidget(self.fig)
self.conn = conn
def plot_xy(self, text):
self.fig.axes.clear()
t = str(self.ui.sqlText.toPlainText())
m = re.search("\s*select\s*(\w+)\s*\,\s*(\w+)", t)
xlabel = m.group(1)
ylabel = m.group(1)
res = self.conn.execute(t).fetchall()
x = []
y = []
for item in res:
x.append(item[0])
y.append(item[1])
self.fig.axes.plot(x,y,'*')
self.fig.axes.set_xlabel(str(xlabel).capitalize())
self.fig.axes.set_ylabel(str(ylabel).capitalize())
self.fig.draw()
# create the dialog for the plugin builder
class PolarisDialog(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.conn = None
self.tabs = {}
self.layouts = {}
# Set up the user interface from Designer.
self.ui = Ui_Polaris()
self.ui.setupUi(self)
self.ver_progress = QtGui.QHBoxLayout(self)
self.progress_bar = QtGui.QProgressBar()
self.progress_bar.setMinimum(0)
self.progress_bar.setMaximum(100)
self.ui.statusbar.addWidget(self.progress_bar)
self.status_message = QtGui.QLabel("")
self.status_message.setMinimumSize(QtCore.QSize(80,20))
self.ui.statusbar.addWidget(self.status_message)
self.ui.actionConnect_TestCase.triggered.connect(self.connect)
self.ui.actionDraw.triggered.connect(self.draw_tt)
self.ui.actionQuit.triggered.connect(self.fileQuit)
self.ui.actionNew_XY_DB_Plot.triggered.connect(self.db_plot)
self.ui.actionNew_XY_DB_Plot_1.triggered.connect(self.db_plot_1)
self.ui.actionTravel_Time_Layer.triggered.connect(self.crate_new_layer)
self.connect()
self.draw_all()
self.db_table(self.dbfileName,"1", "County/Class VMT", "select county as COUNTY, link_type as TYPE, sum(link_vmt) as VMT from link_vmt group by county, link_type order by county, link_type")
self.db_table(self.dbfileName,"3", "Class VMT","select link_type as TYPE, sum(link_vmt) as VMT from link_vmt group by link_type order by link_type")
self.db_table(self.dbfileName,"5", "County VMT", "select county as COUNTY, sum(link_vmt) as VMT from link_vmt group by county")
def fileQuit(self):
self.close()
def create_tab(self,tab_tag, tab_name):
self.tabs[tab_tag] = QtGui.QWidget()
self.tabs[tab_tag].setObjectName(_fromUtf8(tab_name))
layout_tag = tab_tag+"Layout"
self.layouts[layout_tag] = QtGui.QVBoxLayout(self.tabs[tab_tag])
self.layouts[layout_tag].setObjectName(_fromUtf8(layout_tag))
self.ui.tabWidget.addTab(self.tabs[tab_tag], _fromUtf8(tab_name))
def db_table(self,dbfileName, tag, name,sql):
self.create_tab(tag, name)
tv = QtGui.QTableView(self)
tv.setObjectName(_fromUtf8(tag+"tableView"))
self.layouts[tag+"Layout"].addWidget(tv)
db = QtSql.QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName(dbfileName)
QtGui.QMessageBox.about(None, "POLARIS", dbfileName)
if not db.open():
QtGui.QMessageBox.warning(None, "POLARIS",
QString("Database Error: %1").arg(db.lastError().text()))
model = QtSql.QSqlTableModel(None, db)
sql_prepared = QtSql.QSqlQuery(sql)
model.setQuery(sql_prepared)
#model.setTable("Link")
model.select()
#QtGui.QMessageBox.about(None,"POLARIS", "Statement: "+model.selectStatement())
#QtGui.QMessageBox.about(None,"POLARIS", "Name: "+self.dbfileName)
tv.setModel(model)
#tv.show()
del db
return tv
def db_plot(self):
self.create_tab("db_plot", "DB Plot")
dbp = DBPlot(self.conn)
self.layouts["db_plotLayout"].addWidget(dbp)
def db_plot_1(self):
self.create_tab("db_plot_1", "DB Plot 1")
dbp = DBPlot1(self.conn)
self.layouts["db_plot_1Layout"].addWidget(dbp)
def load_recent(self):
home = os.path.expanduser("~")
home += "/.polaris"
if not os.path.exists(home):
os.makedirs(home)
with open(home+'/recent', 'w') as fh: #create empty file
pass
with open(home+'/recent', 'r') as fh:
self.recent_db.add(item)
def draw_tt(self):
x = []
y = []
tt = self.conn.execute("select * from TTime_Distribution order by TTime_minutes").fetchall()
for item in tt:
tt,count = item
x.append(int(tt))
y.append(int(count))
#y.append(math.log10(int(count)))
self.create_tab("ttTab", "Travel Time")
fig = mpl.MyMplCanvas(self.tabs["ttTab"], width=5, height=4, dpi=100)
self.layouts["ttTabLayout"].addWidget(fig.mpl_toolbar)
self.layouts["ttTabLayout"].addWidget(fig)
fig.axes.plot(x,y, '*-')
fig.axes.set_xlabel("Trevel Time (min)")
fig.axes.set_ylabel("Vehicle Count")
def treat_null(self, value):
if value is None:
return 0
else:
return value
def draw_mode(self):
data = self.conn.execute("select * from Mode_Distribution").fetchall()
types = []
autos = []
transits = []
for item in data:
type, auto, transit, total = map(self.treat_null, item)
type = type[0]
autos.append(auto)
types.append(type)
transits.append(transit)
self.create_tab("atTab", "Activity Type")
fig = mpl.MyMplCanvas(self.tabs["atTab"], width=5, height=4, dpi=100)
self.layouts["atTabLayout"].addWidget(fig.mpl_toolbar)
self.layouts["atTabLayout"].addWidget(fig)
N = len(types)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig.axes.set_xticks(ind+width)
fig.axes.set_xticklabels( types )
ar = fig.axes.bar(ind, autos, width, color='r')
tr = fig.axes.bar(ind+width, transits, width, color='y')
fig.axes.legend( (ar[0], tr[0]), ('Auto', 'Transit') )
fig.axes.set_xlabel("Activity Type")
fig.axes.set_ylabel("Trip Count")
def draw_all(self):
self.draw_tt()
self.draw_mode()
def connect(self):
fileName = "D:\\proj\\polaris\\analyser\\\data\\chicago-Supply.sqlite"
if not os.path.exists(fileName):
fileName = QtGui.QFileDialog.getOpenFileName(None, "Open Database", ".", "Image Files (*.db *.sqlite)");
self.dbfileName = fileName
if (fileName == ''):
QtGui.QMessageBox.about(None,"POLARIS", "No file was selected. The plug-in will not start")
return
db_dir = os.path.dirname(fileName)
#os.environ['PATH'] = 'C:\\opt\\polarisdeps\\spatialite\\Win32' + ';' + os.environ['PATH']
self.conn = sqlite3.connect(fileName)
#self.conn = sqlite3.connect("../test_case/chicago-Supply.sqlite")
self.conn.enable_load_extension(1)
self.conn.load_extension('spatialite.dll')
self.conn.execute("attach database \'%s/chicago-Demand.sqlite\' as Demand"%db_dir)
self.conn.execute("attach database \'%s/chicago-Result.sqlite\' as res"%db_dir)
#QtGui.QMessageBox.about(self, "My message box", "Connected")
self.conn_message = QtGui.QLabel("Connected to test_case/chicago-Supply.sqlite")
self.conn_message.setMinimumSize(self.conn_message.sizeHint())
self.ui.statusbar.addWidget(self.conn_message)
def update_status_message(self, val):
self.status_message.setText(str(val))
self.repaint()
def add_layer_to_canvas(self):
self.update_status_message(-999)
uri = QgsDataSourceURI()
uri.setDatabase('../test_case/chicago-Result.sqlite')
schema = ''
table = 'link_moe'
geom_column = 'GEO'
uri.setDataSource(schema, table, geom_column)
display_name = 'Link MOE'
vlayer = QgsVectorLayer(uri.uri(), display_name, 'spatialite')
QgsMapLayerRegistry.instance().addMapLayer(vlayer)
def start_generating_layer(self, start, end):
self.update_status_message(-10)
self.th = CreateLayerTread("../test_case/chicago-Supply.sqlite", start, end)
self.update_status_message(-9)
#QtCore.QObject.connect( self.th, QtCore.SIGNAL("update(QString)"), self.update_status_message )
self.th.partDone.connect(self.update_status_message)
self.th.allDone.connect(self.add_layer_to_canvas)
self.update_status_message(-8)
self.th.start()
def crate_new_layer(self):
self.tsd = TimeSpanDialog()
self.tsd.ok_signal.connect(self.start_generating_layer)
self.tsd.exec_()
| bsd-3-clause |
elijah513/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
t-davidson/command-line-grading | embedding.py | 1 | 4306 | """File to test out neural embeddings"""
import pickle
import numpy as np
import pandas as pd
import string
from nltk.util import ngrams
from gensim import utils
from gensim.models import doc2vec
from sklearn.linear_model import LogisticRegression
from random import seed
from random import shuffle
from paper_classifier import *
from sklearn import linear_model
from sklearn.multiclass import OneVsRestClassifier
from sklearn.feature_selection import SelectFromModel, SelectKBest, chi2
#Tag doc2vec.TaggedDocument(bow, [count])
#Access model.docvecs[count]
def clean_text(text):
text = ''.join(ch for ch in text if ch not in string.punctuation)
return text.lower()
def add_bigrams(tokens):
bigrams=ngrams(tokens,2)
for pair in bigrams:
bigram = pair[0]+' '+pair[1]
tokens.append(bigram)
return tokens
def doc_iterator(df):
"""Parses text documents from the essay field of the
dataframe, cleans text, tokenizes, and returns it
as an iterator"""
for i in range(0, df.shape[0]):
yield clean_text(df.essay.iloc[i]).split()
#tokens = clean_text(df.essay.iloc[i]).split()
#tokens = add_bigrams(tokens)
#yield tokens
###Runs out of memory if bigrams included!
def tagged_iterator(text_iterator):
"""Processes texts in the doc_iterator and returns
an iterator of tagged documents"""
count=0
for bow in text_iterator:
if len(bow) > 0:
yield doc2vec.TaggedDocument(bow, [count])
count += 1
print count-1
def docs_shuffle(iterator):
"""Shuffles the iterator"""
list_of_docs = []
for i in iterator:
list_of_docs.append(i)
shuffle(list_of_docs)
for d in list_of_docs:
yield d
def build_X(df, model, size):
X = np.zeros((df.shape[0], size))
for i in range(0, df.shape[0]):
col = model.docvecs[i]
X[i] = col
return pd.DataFrame(X)
def LogisticRegWithSelection(X, y, threshold):
#First model to select best features
model = linear_model.LogisticRegression(C=1.0, penalty='l1',
class_weight='balanced')
kfold(X, y, model, 5, False)
#y_pred = model.predict(X)
#print y_pred
SFM = SelectFromModel(model, prefit=True,threshold=threshold)
X_new = SFM.transform(X)
#Second model to run on reduced feature set
model2 = linear_model.LogisticRegression(C=1.0, penalty='l2',
class_weight='balanced')
kfold(pd.DataFrame(X_new), y, model2, 5, False)
y_pred = model2.predict(X_new)
print y_pred
def LogisticRegWithOVR(X, y):
###Performs badly with high dim vector
model = linear_model.LogisticRegression(C=10.0, penalty='l1',
class_weight='balanced')
P = OneVsRestClassifier(model)
kfold(X, y, P, 5, True)
#y_pred = P.predict(X)
#print y_pred
if __name__ in '__main__':
df = pickle.load(open('week4_model_table.p', 'rb'))
df = df[df.essay != ''] #these conditions filter essays w/o content
df = df[df.essay != ' ']
df = df[df.grade != 70] #A mislabelled entry
df = df[df.grade != 0] #Remove zero entries
df = df[df.grade != 66] ##Remove ungraded (for now)
print df.shape
docs = doc_iterator(df)
tagged = tagged_iterator(docs)
#tagged = docs_shuffle(tagged) #shuffle order of tagged
size=10000
####Odd, when I don't do feature selection I get junk if size > 100
###but when I do feature selection I get better results with larger size
model = doc2vec.Doc2Vec(
tagged,
size=size,
min_count=3,
workers=4,
iter=20,
)
#for epoch in range(100): ###This appears to make no difference
# seed(randint(0,100))
# tagged = docs_shuffle(tagged)
# model.train(tagged)
print model.most_similar('heat')
model.save('doc2vecmodel')
#print model.docvecs[767]
#model.build_vocab(tagged) #I think my code does this by including tagged in model spec
#Running multiclass classifier
X = build_X(df, model, size)
section = df.section
X.section = section
y = df.grade
LogisticRegWithOVR(X, y)
#Single class
y = df.excellent
LogisticRegWithSelection(X, y, 'mean')
| gpl-3.0 |
mrcslws/htmresearch | projects/associative_network/run_hopfield_network_experiment.py | 11 | 15603 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Experiment with associative network that uses SDRs
Two experiments are included in this script
1. Capacity experiment: How many unique items can a network store such that
each item can be reliably retrieved?
2. Simultaneously retrieve multiple items by relaxing the sparsity
"""
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
plt.ion()
plt.close('all')
class hyperColumnNetwork(object):
def __init__(self,
numHyperColumn,
numNeuronPerHyperColumn,
numActiveNeuronPerHyperColumn,
numInputs,
minThreshold=0,
matchThreshold=10):
self.numHyperColumn = numHyperColumn
self.numNeuronPerHyperColumn = numNeuronPerHyperColumn
self.numActiveNeuronPerHyperColumn = numActiveNeuronPerHyperColumn
self.numInputs = numInputs
self.minThreshold = minThreshold
self.matchThreshold = matchThreshold
self.numNeuronTotal = numHyperColumn * numNeuronPerHyperColumn
# initialize weight matrix
self.weightFF = np.eye(self.numNeuronTotal, numInputs)
self.weightRecurrent = np.zeros((self.numNeuronTotal, self.numNeuronTotal))
def initializeObjectSDRs(self, numObjects, seed=1):
# initialize object SDRs in HC
np.random.seed(seed)
objectSDRActiveBits = []
for i in range(numObjects):
objectSDRActiveBits.append([])
for j in range(self.numHyperColumn):
randomCells = np.random.permutation(range(self.numNeuronPerHyperColumn))
objectSDRActiveBits[i].append(
randomCells[:self.numActiveNeuronPerHyperColumn])
return objectSDRActiveBits
def memorizeObjectSDRs(self, objectSDRActiveBits):
numObjects = len(objectSDRActiveBits)
# initialize recurrent connections
self.weightRecurrent = np.zeros((self.numNeuronTotal, self.numNeuronTotal))
for i in range(numObjects):
offset = 0
objectSDR = np.zeros((self.numNeuronTotal, 1))
for j in range(self.numHyperColumn):
objectSDR[offset+objectSDRActiveBits[i][j], 0] = 1
offset += self.numNeuronPerHyperColumn
self.weightRecurrent += np.dot(objectSDR, np.transpose(objectSDR))
for i in range(self.numNeuronTotal):
self.weightRecurrent[i, i] = 0
def run(self, initialState, feedforwardInputs):
"""
Run network for multiple steps
:param initialState:
:param feedforwardInputs: list of feedforward inputs
:return: list of active cell indices over time
"""
currentState = initialState
activeStateHistory = [np.where(initialState > 0)[0]]
numStep = len(feedforwardInputs)
for i in range(numStep):
currentState = self.runSingleStep(currentState,
feedforwardInputs[i])
activeStateHistory.append([np.where(currentState > 0)[0]])
return activeStateHistory
def runSingleStep(self,
previousState,
feedforwardInputs):
"""
Run network for one step
:param previousState: a (Ncell, 1) numpy array of network states
:param maxNumberOfActiveCellsPerColumn: maximum number of active cells per
column
:return: newState
"""
print "previous activeCells ", np.sort(np.where(previousState>0)[0])
feedforwardInputOverlap = np.dot(self.weightFF, feedforwardInputs)
lateralInputOverlap = np.dot(self.weightRecurrent, previousState)
totalInput = feedforwardInputOverlap + lateralInputOverlap
print "feedforwardInputOverlap: ", np.sort(np.where(feedforwardInputOverlap>0)[0])
# cells with active feedforward zone
feedforwardActive = feedforwardInputOverlap > self.minThreshold
# cells with active distal zone (that receives lateral connections)
lateralActive = lateralInputOverlap > self.minThreshold
# cells with both active feedforward zone and lateral zone
strongActive = np.logical_and(feedforwardActive, lateralActive)
newState = np.zeros((self.numNeuronTotal, 1))
offset = 0
for i in range(self.numHyperColumn):
numberOfStrongActiveCellsInColumn = np.sum(
strongActive[offset:offset+self.numNeuronPerHyperColumn])
print "numberOfStrongActiveCellsInColumn: ", numberOfStrongActiveCellsInColumn
if numberOfStrongActiveCellsInColumn > self.matchThreshold:
self.numActiveNeuronPerHyperColumn = self.numActiveNeuronPerHyperColumn/2
w = self.numActiveNeuronPerHyperColumn
cellIdx = np.argsort(totalInput[offset:offset+self.numNeuronPerHyperColumn, 0])
activeCells = cellIdx[-w:] + offset
activeCells = activeCells[np.where(
totalInput[activeCells] > self.minThreshold)[0]]
newState[activeCells] = 1
print "activeCells ", np.sort(activeCells)
offset += self.numNeuronPerHyperColumn
return newState
def convertActiveCellsToSDRs(activeStateHistory, numCells):
"""
Convert list of active cell indices to a list of SDRs
:param activeStateHistory: list of active cell indices per step
:param numCells: total number of cells
:return: sdrHistory numpy array of (numStep, numCells)
"""
numStep = len(activeStateHistory)
sdrHistory = np.zeros((numStep, numCells))
for i in range(numStep):
sdrHistory[i, activeStateHistory[i]] = 1
return sdrHistory
def stripSDRHistoryForDisplay(sdrHistory, removePortion=0.5):
"""
Strip SDR History (remove unused bits) for display purpose
:param sdrHistory:
:return: displayBitIndex
"""
sdrHistorySum = np.sum(sdrHistory, axis=0)
unusedBitIndices = np.where(sdrHistorySum == 0)[0]
usedBitIndices = np.where(sdrHistorySum > 1)[0]
numUnusedBitKeep = int(len(unusedBitIndices) * (1-removePortion))
unusedBitIndices = np.random.permutation(unusedBitIndices)
unusedBitIndices = unusedBitIndices[:numUnusedBitKeep]
displayBitIndex = np.concatenate((usedBitIndices, unusedBitIndices))
displayBitIndex = np.sort(displayBitIndex)
return displayBitIndex
def generateSDRforDisplay(numNeuron, activeBits, displayBitIndex):
sdrForDisplay = np.zeros((1, numNeuron))
sdrForDisplay[0, activeBits] = 1
sdrForDisplay = np.matlib.repmat(sdrForDisplay[:, displayBitIndex], 10, 1)
return sdrForDisplay
def runSingleExperiment(numObjects, numBitNoise, seed=10):
np.random.seed(seed)
hcNet = hyperColumnNetwork(numHyperColumn=1,
numNeuronPerHyperColumn=1024,
numActiveNeuronPerHyperColumn=20,
numInputs=1024)
objectSDRActiveBits = hcNet.initializeObjectSDRs(numObjects=numObjects,
seed=seed)
hcNet.memorizeObjectSDRs(objectSDRActiveBits)
objectIDTest = np.random.choice(numObjects, 100)
finalOverlapList = []
for objectID in objectIDTest:
initialState = np.zeros((hcNet.numNeuronTotal, 1))
randomCells = np.random.permutation(range(hcNet.numNeuronTotal))
initialState[objectSDRActiveBits[objectID][0][:(20-numBitNoise)]] = 1
initialState[randomCells[:numBitNoise]] = 1
feedforwardInputs = [np.zeros((hcNet.numNeuronTotal, 1))] * 5
activeStateHistory = hcNet.run(initialState, feedforwardInputs)
sdrHistory = convertActiveCellsToSDRs(activeStateHistory,
hcNet.numNeuronTotal)
initialActiveCells = np.where(sdrHistory[0, :] > 0)[0]
finalActiveCells = np.where(sdrHistory[-1, :] > 0)[0]
finalOverlap = len(
set(objectSDRActiveBits[objectID][0]).intersection(finalActiveCells))
initialOverlap = len(
set(objectSDRActiveBits[objectID][0]).intersection(initialActiveCells))
finalOverlapList.append(finalOverlap)
return finalOverlapList
def capacityExperiment():
numObjectList = np.linspace(start=100, stop=2000, num=10).astype('int')
numBitNoiseList = [2, 4, 8, 10, 15]
numRpts = 3
avgFinalOverlap = np.zeros(
(numRpts, len(numBitNoiseList), len(numObjectList)))
for i in range(len(numBitNoiseList)):
for j in range(len(numObjectList)):
for rpt in range(3):
print "run experiment with object # {} noise # {} rpt {}".format(
numObjectList[j], numBitNoiseList[i], rpt
)
finalOverlap = runSingleExperiment(numObjectList[j],
numBitNoiseList[i], seed=rpt)
avgFinalOverlap[rpt, i, j] = (np.mean(finalOverlap))
plt.figure()
finalOverlaps = np.mean(avgFinalOverlap, 0)
legendList = []
for i in range(len(numBitNoiseList)):
plt.plot(numObjectList, finalOverlaps[i, :])
legendList.append("noise = {}".format(numBitNoiseList[i]))
plt.legend(legendList)
plt.plot([140, 140], [0, 20], 'k--')
plt.xlabel('Number of Object')
plt.ylabel('Overlap(retrieved sdr, original sdr)')
plt.savefig('capacity_experiment_result.pdf')
def retrieveMultipleItems():
hcNet = hyperColumnNetwork(numHyperColumn=1,
numNeuronPerHyperColumn=1024,
numActiveNeuronPerHyperColumn=20,
numInputs=1024,
minThreshold=0)
numObjects = 100
objectSDRActiveBits = hcNet.initializeObjectSDRs(numObjects=numObjects,
seed=42)
hcNet.memorizeObjectSDRs(objectSDRActiveBits)
hcNet.numActiveNeuronPerHyperColumn = 40
objectID1 = 1
objectID2 = 2
ambiguousInput = np.zeros((hcNet.numNeuronTotal, 1))
ambiguousInput[objectSDRActiveBits[objectID1][0][:10]] = 10
ambiguousInput[objectSDRActiveBits[objectID2][0][:10]] = 10
nStep = 20
feedforwardInputs = [ambiguousInput]
for i in range(1, nStep):
feedforwardInputs.append(np.zeros((hcNet.numNeuronTotal, 1)))
feedforwardInputs[10][objectSDRActiveBits[objectID1][0]] = 1
initialState = np.zeros((hcNet.numNeuronTotal, 1))
# initialState = ambiguousInput
activeStateHistory = hcNet.run(initialState, feedforwardInputs)
sdrHistory = convertActiveCellsToSDRs(activeStateHistory,
hcNet.numNeuronTotal)
displayBitIndex = stripSDRHistoryForDisplay(sdrHistory, removePortion=0.9)
initialActiveCells = np.where(sdrHistory[0, :] > 0)[0]
print initialActiveCells
finalActiveCells = np.where(sdrHistory[-1, :] > 0)[0]
initialOverlap1 = len(
set(objectSDRActiveBits[objectID1][0]).intersection(initialActiveCells))
initialOverlap2 = len(
set(objectSDRActiveBits[objectID2][0]).intersection(initialActiveCells))
finalOverlap1 = len(
set(objectSDRActiveBits[objectID1][0]).intersection(finalActiveCells))
finalOverlap2 = len(
set(objectSDRActiveBits[objectID2][0]).intersection(finalActiveCells))
print "Initial overlap with object SDR 1: {}".format(initialOverlap1)
print "Initial overlap with object SDR 2: {}".format(initialOverlap2)
print "Final overlap with object SDR 1: {}".format(finalOverlap1)
print "Final overlap with object SDR 2: {}".format(finalOverlap2)
fig, ax = plt.subplots(nrows=4, ncols=1)
object1SDR = generateSDRforDisplay(hcNet.numNeuronTotal,
objectSDRActiveBits[objectID1],
displayBitIndex)
object2SDR = generateSDRforDisplay(hcNet.numNeuronTotal,
objectSDRActiveBits[objectID2],
displayBitIndex)
querySDR = np.matlib.repmat(np.transpose(ambiguousInput[displayBitIndex]), 10, 1)
ax[0].imshow(object1SDR, cmap='gray')
ax[0].set_title('SDR for Object A')
ax[1].imshow(object2SDR, cmap='gray')
ax[1].set_title('SDR for Object B')
ax[2].imshow(querySDR, cmap='gray')
ax[2].set_title('query SDR')
ax[3].imshow(sdrHistory[:, displayBitIndex], cmap='gray')
ax[3].set_title('Network states over time')
plt.savefig('figures/retrieveMultipleItems.pdf')
def multipleHyperColumn():
hcNet = hyperColumnNetwork(numHyperColumn=3,
numNeuronPerHyperColumn=1024,
numActiveNeuronPerHyperColumn=20,
numInputs=1024*3,
minThreshold=0)
numObjects = 10
objectSDRActiveBits = hcNet.initializeObjectSDRs(numObjects=numObjects,
seed=40)
hcNet.memorizeObjectSDRs(objectSDRActiveBits)
initialState = np.zeros((hcNet.numNeuronTotal, 1))
objectID1 = 1
offset = 0
ambiguousInput = np.zeros((hcNet.numNeuronTotal, 1))
for i in range(hcNet.numHyperColumn):
if i != 1:
ambiguousInput[offset + objectSDRActiveBits[objectID1][i][:20]] = 1
# initialState[offset + objectSDRActiveBits[objectID2][i][:10]] = 1
offset += hcNet.numNeuronPerHyperColumn
nStep = 10
feedforwardInputs = [ambiguousInput]
for i in range(1, nStep):
feedforwardInputs.append(np.zeros((hcNet.numNeuronTotal, 1)))
hcNet.numActiveNeuronPerHyperColumn = 20
activeStateHistory = hcNet.run(initialState, feedforwardInputs)
sdrHistory = convertActiveCellsToSDRs(activeStateHistory,
hcNet.numNeuronTotal)
offset = 0
plt.figure()
fig, ax = plt.subplots(3, 3)
for i in range(hcNet.numHyperColumn):
activationColumnI = sdrHistory[:, offset:(offset+hcNet.numNeuronPerHyperColumn)]
initialOverlap1 = len(
set(objectSDRActiveBits[objectID1][i]).intersection(set(np.where(activationColumnI[0, :]>0)[0])))
finalOverlap1 = len(
set(objectSDRActiveBits[objectID1][i]).intersection(set(np.where(activationColumnI[-1, :]>0)[0])))
print "initial Overlap with column {} : {}".format(i, initialOverlap1)
print "final Overlap with column {} : {}".format(i, finalOverlap1)
displayBitIndex = stripSDRHistoryForDisplay(activationColumnI, removePortion=0.9)
displayBitIndex = displayBitIndex[:30]
object1SDR = generateSDRforDisplay(hcNet.numNeuronPerHyperColumn,
objectSDRActiveBits[objectID1][i],
displayBitIndex)
ffInput = np.zeros((nStep, hcNet.numNeuronPerHyperColumn))
for s in range(nStep):
ffInput[s, :] = feedforwardInputs[s][offset:(offset+hcNet.numNeuronPerHyperColumn)][:, 0]
ax[0, i].imshow(object1SDR, cmap='gray')
ax[0, i].set_title('Module {}'.format(i))
ax[1, i].imshow(ffInput[:, displayBitIndex], cmap='gray')
ax[1, i].set_ylabel('ffInput')
ax[2, i].imshow(activationColumnI[:, displayBitIndex], cmap='gray')
ax[2, i].set_ylabel('Network state')
offset += hcNet.numNeuronPerHyperColumn
plt.savefig('figures/experimentMultipleModules.pdf')
if __name__ == "__main__":
multipleHyperColumn()
| agpl-3.0 |
ThomasBrouwer/BNMTF | code/models/distributions/gamma.py | 1 | 1316 | """
Class representing a gamma distribution, allowing us to sample from it,
and compute the expectation and the expectation of the log.
"""
import math
from scipy.special import psi as digamma
from numpy.random import gamma
# Gamma draws
def gamma_draw(alpha,beta):
shape = float(alpha)
scale = 1.0 / float(beta)
return gamma(shape=shape,scale=scale,size=None)
# Gamma expectation
def gamma_expectation(alpha,beta):
alpha, beta = float(alpha), float(beta)
return alpha / beta
# Gamma variance
def gamma_expectation_log(alpha,beta):
alpha, beta = float(alpha), float(beta)
return digamma(alpha) - math.log(beta)
# Gamma mode
def gamma_mode(alpha,beta):
alpha, beta = float(alpha), float(beta)
return (alpha-1) / beta
'''
# Do 1000 draws and plot them
import matplotlib.pyplot as plt
import scipy.special as sps
import numpy as np
shape, scale = 2., 2. # mean and dispersion
s = [gamma_draw(shape,1.0/scale) for i in range(0,1000)]
s2 = np.random.gamma(shape, scale, 1000)
count, bins, ignored = plt.hist(s, 50, normed=True)
count, bins, ignored = plt.hist(s2, 50, normed=True)
y = bins**(shape-1)*(np.exp(-bins/scale) /
(sps.gamma(shape)*scale**shape))
plt.plot(bins, y, linewidth=2, color='r')
plt.show()
''' | apache-2.0 |
vlas-sokolov/pyspeckit | pyspeckit/spectrum/widgets.py | 2 | 15600 | from __future__ import print_function
from astropy.extern.six.moves import xrange
from matplotlib.widgets import Widget,Button,Slider
import matplotlib
import warnings
class dictlist(list):
def __init__(self, *args):
list.__init__(self, *args)
self._dict = {}
self._dict_index = {}
for ii,value in enumerate(self):
if len(value) == 2:
self._dict[value[0]] = value[1]
self._dict_index[value[0]] = ii
self._dict_index[ii] = value[0]
else:
self._dict[ii] = value
self._dict_index[ii] = ii
def __getitem__(self, key):
if type(key) is int:
return super(dictlist,self).__getitem__(key)
else:
return self._dict[key]
def __setitem__(self, key, value):
if type(key) is int:
super(dictlist,self).__setitem__(key,value)
self._dict[self._dict_index[key]] = value
else:
if key in self._dict:
self._dict[key] = value
self[self._dict_index[key]] = value
else:
self._dict[key] = value
self._dict_index[key] = len(self)
self._dict_index[len(self)] = key
self.append(value)
def __slice__(self, s1, s2):
pass
def values(self):
return [self._dict[self._dict_index[ii]] for ii in xrange(len(self))]
def keys(self):
return [self._dict_index[ii] for ii in xrange(len(self))]
class ModifiableSlider(Slider):
def set_valmin(self, valmin):
"""
Change the minimum value of the slider
"""
self.valmin = valmin
self.ax.set_xlim((self.valmin,self.valmax))
if self.val < self.valmin:
self.set_val(self.valmin)
if self.valinit < self.valmin:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
def set_valmax(self, valmax):
"""
Change the maximum value of the slider
"""
self.valmax = valmax
self.ax.set_xlim((self.valmin,self.valmax))
if self.val > self.valmax:
self.set_val(self.valmax)
if self.valinit > self.valmax:
self.valinit = (self.valmax-self.valmin)/2. + self.valmin
if self.vline in self.ax.lines:
self.ax.lines.remove(self.vline)
self.vline = self.ax.axvline(self.valinit,0,1, color='r', lw=1)
class FitterSliders(Widget):
"""
A tool to adjust to subplot params of a :class:`matplotlib.figure.Figure`
"""
def __init__(self, specfit, targetfig, npars=1, toolfig=None, parlimitdict={}):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.parlimitdict = parlimitdict
from matplotlib import pyplot
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
if hasattr(targetfig.canvas.manager,'window'):
if hasattr(targetfig.canvas.manager.window, 'title'):
self.toolfig.canvas.set_window_title("Fit Sliders for "+targetfig.canvas.manager.window.title())
elif hasattr(targetfig.canvas.manager.window, 'windowTitle'):
self.toolfig.canvas.set_window_title("Fit Sliders for "+targetfig.canvas.manager.window.windowTitle())
else:
warnings.warn("Only Qt4 and TkAgg support window titles (apparently)")
self.toolfig.subplots_adjust(top=0.9,left=0.2,right=0.9)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.2, right=0.9)
bax = self.toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
self.set_sliders(parlimitdict)
def reset(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in self.sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in self.sliders:
slider.reset()
# reset drawon
for slider, b in zip(self.sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = self.toolfig.subplotpars.validate
self.toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(reset)
self.toolfig.subplotpars.validate = validate
def clear_sliders(self):
"""
Get rid of the sliders...
"""
try:
for sl in self.sliders:
sl.ax.remove()
except NotImplementedError:
for sl in self.sliders:
self.specfit.Spectrum.plotter.figure.delaxes(sl.ax)
self.specfit.Spectrum.plotter.refresh()
def set_sliders(self, parlimitdict={}):
"""
Set the slider properties, actions, and values
can also reset their limits
"""
def update(value):
mpp = [slider.val for slider in self.sliders]
for line in self.specfit.modelplot:
line.set_ydata(self.specfit.get_model_frompars(line.get_xdata(),mpp))
# update components too
for ii,line in enumerate(self.specfit._plotted_components):
xdata = line.get_xdata()
modelcomponents = self.specfit.fitter.components(xdata,
mpp,
**self.specfit._component_kwargs)
for jj,data in enumerate(modelcomponents):
if ii % 2 == jj:
# can have multidimensional components
if len(data.shape) > 1:
for d in (data):
line.set_ydata(d)
else:
line.set_ydata(data)
self.specfit.Spectrum.plotter.refresh()
self.sliders = dictlist()
npars = len(self.specfit.parinfo)
for param in self.specfit.parinfo:
name = param['parname']
value = param['value']
limited = param['limited']
limits = param['limits']
# make one less subplot so that there's room for buttons
# param['n'] is zero-indexed, subplots are 1-indexed
ax = self.toolfig.add_subplot(npars+1,1,param['n']+1)
ax.set_navigate(False)
if name in parlimitdict:
limits = parlimitdict[name]
limited = [True,True]
if limited[0]:
vmin = limits[0]
elif value != 0:
vmin = min([value/4.0,value*4.0])
else:
vmin = -1
if limited[1]:
vmax = limits[1]
elif value != 0:
vmax = max([value/4.0,value*4.0])
else:
vmax = 1
try:
self.sliders[name] = ModifiableSlider(ax,
name, vmin, vmax, valinit=value)
except ValueError:
self.sliders[name] = ModifiableSlider(ax,
name, vmin.value, vmax.value, valinit=value)
self.sliders[-1].on_changed(update)
def get_values(self):
return [s.val for s in self.sliders]
class FitterTools(Widget):
"""
A tool to monitor and play with :class:`pyspeckit.spectrum.fitter` properties
--------------------------
| Baseline range [x,x] |
| Baseline order - |
| (Baseline subtracted) |
| |
| Fitter range [x,x] |
| Fitter type ------- |
| Fitter Guesses [p,w] |
| ... ... |
| ... ... |
| |
| (Fit) (BL fit) (reset) |
--------------------------
"""
def __init__(self, specfit, targetfig, toolfig=None, nsubplots=12):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
self.targetfig = targetfig
self.specfit = specfit
self.baseline = specfit.Spectrum.baseline
self.plotter = specfit.Spectrum.plotter
from matplotlib import pyplot
if toolfig is None:
tbar = matplotlib.rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
matplotlib.rcParams['toolbar'] = 'None'
self.toolfig = pyplot.figure(figsize=(6,3))
self.toolfig.canvas.set_window_title("Fit Tools for "+targetfig.canvas.manager.window.title())
self.toolfig.subplots_adjust(top=0.9,left=0.05,right=0.95)
matplotlib.rcParams['toolbar'] = tbar
else:
self.toolfig = toolfig
self.toolfig.subplots_adjust(left=0.0, right=1.0)
#bax = self.toolfig.add_axes([0.6, 0.05, 0.15, 0.075])
#self.buttonrefresh = Button(bax, 'Refresh')
# buttons ruin everything.
# fax = self.toolfig.add_axes([0.1, 0.05, 0.15, 0.075])
# self.buttonfit = Button(fax, 'Fit')
#
# resetax = self.toolfig.add_axes([0.7, 0.05, 0.15, 0.075])
# self.buttonreset = Button(resetax, 'Reset')
# resetblax = self.toolfig.add_axes([0.3, 0.05, 0.15, 0.075])
# self.buttonresetbl = Button(resetblax, 'Reset BL')
# resetfitax = self.toolfig.add_axes([0.5, 0.05, 0.15, 0.075])
# self.buttonresetfit = Button(resetfitax, 'Reset fit')
def refresh(event):
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
self.targetfig.canvas.draw()
def fit(event):
self.specfit.button3action(event)
def reset_fit(event):
self.specfit.guesses = []
self.specfit.npeaks = 0
self.specfit.includemask[:] = True
self.refresh(event)
def reset_baseline(event):
self.baseline.unsubtract()
self.refresh(event)
def reset(event):
reset_baseline(event)
reset_fit(event)
self.plotter()
self.refresh(event)
# during refresh there can be a temporary invalid state
# depending on the order of the refresh so we turn off
# validation for the refreshting
#validate = self.toolfig.subplotpars.validate
#self.toolfig.subplotpars.validate = False
#self.buttonrefresh.on_clicked(refresh)
#self.toolfig.subplotpars.validate = validate
# these break everything.
# self.buttonfit.on_clicked(fit)
# self.buttonresetfit.on_clicked(reset_fit)
# self.buttonresetbl.on_clicked(reset_baseline)
# self.buttonreset.on_clicked(reset)
#menuitems = []
#for label in ('polynomial','blackbody','log-poly'):
# def on_select(item):
# print 'you selected', item.labelstr
# item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
# on_select=on_select)
# menuitems.append(item)
#menu = Menu(fig, menuitems)
self.axes = [self.toolfig.add_subplot(nsubplots,1,spnum, frame_on=False, navigate=False, xticks=[], yticks=[])
for spnum in xrange(1,nsubplots+1)]
#self.axes = self.toolfig.add_axes([0,0,1,1])
self.use_axes = [0,1,2,4,5,6,7,8,9,10,11]
self.labels = dict([(axnum,None) for axnum in self.use_axes])
self.update_information()
self.targetfig.canvas.mpl_connect('button_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('key_press_event',self.refresh)
self.targetfig.canvas.mpl_connect('draw_event',self.refresh)
def refresh(self, event):
try:
thisdrawon = self.drawon
self.drawon = False
self.update_information()
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
self.toolfig.canvas.draw()
except:
# ALWAYS fail silently
# this is TERRIBLE coding practice, but I have no idea how to tell the object to disconnect
# when the figure is closed
pass
def update_information(self, **kwargs):
self.information = [
("Baseline Range","(%g,%g)" % (self.baseline.xmin,self.baseline.xmax)),
("Baseline Order","%i" % (self.baseline.order)),
("Baseline Subtracted?","%s" % (self.baseline.subtracted)),
("Fitter Range","(%g,%g)" % (self.specfit.xmin,self.specfit.xmax)),
("Fitter Type","%s" % (self.specfit.fittype)),
]
for ii in xrange(self.specfit.npeaks):
guesses = tuple(self.specfit.guesses[ii:ii+3])
if len(guesses) == 3:
self.information += [("Fitter guesses%i:" % ii , "p: %g c: %g w: %g" % guesses) ]
else:
break
self.show_labels(**kwargs)
def show_selected_region(self):
self.specfit.highlight_fitregion()
def show_label(self, axis, text, xloc=0.0, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_value(self, axis, text, xloc=0.5, yloc=0.5, **kwargs):
return axis.text(xloc, yloc, text, **kwargs)
def show_labels(self, **kwargs):
for axnum,(label,text) in zip(self.use_axes, self.information):
if self.labels[axnum] is not None and len(self.labels[axnum]) == 2:
labelobject,textobject = self.labels[axnum]
labelobject.set_label(label)
textobject.set_text(text)
else:
self.labels[axnum] = (self.show_label(self.axes[axnum],label),
self.show_value(self.axes[axnum],text))
def update_info_texts(self):
for newtext,textobject in zip(self.information.values(), self.info_texts):
textobject.set_text(newtext)
#import parinfo
#
#class ParameterButton(parinfo.Parinfo):
# """
# A class to manipulate individual parameter values
# """
# def __init__(self,
| mit |
jjunell/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
xapharius/mrEnsemble | Engine/src/examples/simon_covnet.py | 2 | 2474 | '''
Created on Jul 31, 2015
@author: xapharius
'''
import numpy as np
import utils.imageutils as imgutils
import utils.numpyutils as nputils
from algorithms.neuralnetwork.convolutional.conv_net import ConvNet
from datahandler.image2.image_data_handler import ImageDataHandler
from factory.algorithm_factory import AlgorithmFactory
from datahandler.numerical.NumericalDataSet import NumericalDataSet
from factory.homogenous_factory import HomogenousFactory
import utils.serialization as srlztn
import matplotlib.pyplot as plt
from simulation.benchmarker.model_benchmarker import ModelBenchmarker
from simulation.sampler.bootstrap_sampler import BootstrapSampler
import simulation.benchmarker.dataset_loader as dloader
from validator.classification_validator import ClassificationValidator
rawdataset = dloader._get_wildfire("div")
bs = BootstrapSampler(0.01, with_replacement=False)
bs.bind_data(rawdataset.training_inputs, rawdataset.training_targets)
inp, lab = bs.sample()
print len(lab), lab.sum()
"""
rawdataset = dloader._get_binary_mnist()
inp = rawdataset.training_inputs
lab = rawdataset.training_targets
"""
# 28x28 -> C(5): 24x24 -> P(2): 12x12 -> C(5): 8x8 -> P(2): 4x4 -> C(4): 1x1
#topo = [[('c', 5, 8), ('p', 2), ('c', 5, 16), ('p', 2), ('c', 4, 16), ('mlp', 16, 16, 1)]]
"""
# 512x -> C(101): 412x -> P(4): 103x -> C(44): 60x -> P(2) -> 30 -> C(30)
topo = [[('c', 101, 16), ('p', 4), ('c', 44, 8), ('p', 2), ('c', 30, 8), ('mlp', 8, 8, 1)]]
# 256x -> C(57): 200x -> P(4): 50x -> C(21): 30x -> P(2) -> 15 -> C(15)
topo = [[('c', 57, 16), ('p', 4), ('c', 21, 8), ('p', 2), ('c', 15, 8), ('mlp', 8, 8, 1)]]
# 128x -> C(29): 100x -> P(2): 50x -> C(11): 40x -> P(2) -> 20 -> C(20)
topo = [[('c', 29, 16), ('p', 2), ('c', 11, 8), ('p', 2), ('c', 20, 8), ('mlp', 8, 8, 1)]]
# 64x -> C(35): 30x -> P(2): 15x -> C(6): 10x -> P(2) -> 5 -> C(5)
"""
topo = [[('c', 57, 16), ('p', 10), ('c', 20, 16), ('mlp', 16, 16, 1)]]
params = {'iterations':10, 'learning_rate':0.01, 'topo':topo}
algf = AlgorithmFactory(ConvNet, algorithm_params=params)
datahandler = ImageDataHandler()
factory = HomogenousFactory(datahandler, algf)
net = factory.get_instance()
net.train(inp, lab)
#net.train(rawdataset.training_inputs, rawdataset.training_targets)
validator = ClassificationValidator()
results = validator.validate(net, rawdataset.validation_inputs, rawdataset.validation_targets)
print results
plt.plot(net.model.train_acc_err)
plt.plot(net.model.val_acc_err)
plt.show()
| mit |
ericmjl/flu-sequence-predictor | utils/webplots.py | 1 | 7849 | from collections import defaultdict
from datetime import datetime
import pandas as pd
import yaml
from bokeh.embed import components
from bokeh.layouts import row
from bokeh.models import (
ColumnDataSource,
CrosshairTool,
HoverTool,
PanTool,
Range1d,
ResetTool,
SaveTool,
)
from bokeh.palettes import inferno
from bokeh.plotting import figure
from scipy.spatial import ConvexHull
from utils.data import load_prediction_coordinates, load_sequence_and_metadata
def make_vaccine_effectiveness_plot():
"""
This makes the plot that introduces vaccine effectiveness.
"""
# Download and preprocess data.
starttime = datetime.now()
cdc_tables = pd.read_html(
"https://www.cdc.gov/flu/vaccines-work/past-seasons-estimates.html"
) # noqa
cdc_ve = cdc_tables[0]
cdc_ve.columns = cdc_ve.loc[0, :]
cdc_ve = cdc_ve.drop(0).reset_index(drop=True)
cdc_ve.columns = [
"season",
"reference",
"study_sites",
"num_patients",
"overall_ve",
"CI",
]
cdc_ve["season_start"] = (
cdc_ve["season"].str.split("-").str[0].apply(lambda x: str(x))
)
# Configure Bokeh Plot
cdc_src = ColumnDataSource(cdc_ve)
hover_tool = HoverTool()
hover_tool.tooltips = [
("Year", "@season_start"),
("Effectiveness (%)", "@overall_ve"),
]
tools = [PanTool(), CrosshairTool(), hover_tool, ResetTool(), SaveTool()]
# Make Bokeh Plot
p = figure(
title="Yearly Vaccine Effectiveness",
plot_height=300,
plot_width=350,
tools=tools,
)
p.xaxis.axis_label = "Year"
p.yaxis.axis_label = "Vaccine Effectiveness (%)"
p.y_range = Range1d(0, 100)
p.line(x="season_start", y="overall_ve", source=cdc_src, line_width=2)
p.circle(
x="season_start",
y="overall_ve",
source=cdc_src,
radius=5,
radius_units="screen",
)
endtime = datetime.now()
elapsed = endtime - starttime
print(f"make_vaccine_effectiveness_plot() took {elapsed} seconds")
return components(p)
def make_num_sequences_per_year_plot():
starttime = datetime.now()
# Download and Preprocess Data
sequences, metadata = load_sequence_and_metadata()
metadata["Year"] = metadata["Collection Date"].apply(lambda x: x.year)
metadata = metadata[metadata["Host Species"] == "IRD:Human"]
gb = metadata.groupby("Year").count().reset_index()
# Configure Bokeh Plot
seqperyear_src = ColumnDataSource(gb)
hover_tool = HoverTool()
hover_tool.tooltips = [("Year", "@Year"), ("Num. Sequences", "@Name")]
tools = [PanTool(), CrosshairTool(), hover_tool, ResetTool(), SaveTool()]
# Make figure
p = figure(
plot_height=300,
plot_width=350,
tools=tools,
title="Num. Sequences Per Year",
)
p.line(x="Year", y="Name", source=seqperyear_src, line_width=2)
p.circle(
x="Year",
y="Name",
source=seqperyear_src,
radius=5,
radius_units="screen",
)
p.xaxis.axis_label = "Year"
p.yaxis.axis_label = "Number of Sequences"
# Collate metadata dictionary.
meta = dict()
meta["n_seqs"] = len(metadata)
meta["min_year"] = min(metadata["Year"])
meta["max_year"] = max(metadata["Year"])
endtime = datetime.now()
elapsed = endtime - starttime
print(f"make_num_sequences_per_year_plot() took {elapsed} seconds.")
return components(p), meta
def make_coordinate_scatterplot(coords, src, predcoords, vacc_src):
"""
This makes one embedding coordinate scatter plot.
"""
starttime = datetime.now()
cx, cy = coords
assert cx != cy
p = figure(
# webgl=True,
tools="pan,box_select,wheel_zoom,reset,save",
plot_width=300,
plot_height=250,
)
# Plot the "average coordinates per quarter.".
p.scatter(
x="coords{0}".format(cx),
y="coords{0}".format(cy),
source=src,
color="palette",
size=10,
line_color="black",
line_width=2,
name="avg",
)
# Plot the vaccine strains.
p.square(
x="coords{0}".format(cx),
y="coords{0}".format(cy),
color="blue",
line_color="black",
line_width=2,
name="vacc",
size=10,
source=vacc_src,
)
# Add the hover tool for only the vaccine plot (name="vacc")
hover_vacc = HoverTool(names=["vacc"])
hover_vacc.tooltips = [("Vaccine, Years Deployed", "@years_deployed")]
p.add_tools(hover_vacc)
# Add the hover tool for just the "average" sequences (name="avg")
hover_avg = HoverTool(names=["avg"])
hover_avg.tooltips = [("Average Sequence, Year", "@year")]
p.add_tools(hover_avg)
dim1 = "coords{0}".format(cx)
dim2 = "coords{0}".format(cy)
# Plot bounding boxes for the forecasted sequenes. Only those with greater
# than 2.5% probability (i.e. 25/1000) are shown.
xs_all = []
ys_all = []
colors = []
for (mpl_color, hex_color), dat in predcoords.groupby(
["matplotlib_colors", "hexdecimal_colors"]
):
d = dat[[dim1, dim2]]
if len(d) >= 25: # 25 = 2.5% of 1000.
xs = []
ys = []
hull = ConvexHull(d[[dim1, dim2]])
for v in hull.vertices:
xs.append(d.iloc[v][dim1])
ys.append(d.iloc[v][dim2])
xs.append(xs[0]) # re-append first data point so that line goes
ys.append(ys[0]) # back to the original point.
xs_all.append(xs)
ys_all.append(ys)
colors.append(hex_color)
p.multi_line(xs_all, ys_all, color=colors)
p.xaxis.axis_label = "Dimension {0}".format(cx + 1)
p.yaxis.axis_label = "Dimension {0}".format(cy + 1)
endtime = datetime.now()
elapsed = endtime - starttime
print(f"make_coordinate_scatterplot() took {elapsed} seconds.")
return p
def make_coord_plots():
"""
This makes all of the embedding coordinate scatter plots.
"""
# Read in the data.
data = pd.read_csv(
"https://raw.githubusercontent.com/ericmjl/flu-sequence-predictor/master/data/metadata_with_embeddings.csv", # noqa
index_col=0,
parse_dates=["Collection Date"],
)
data["year"] = data["Collection Date"].apply(lambda x: x.year)
data["Strain Name"] = data["Strain Name"].str.split("(").str[0]
# Filter out just vaccine strains.
with open("data/vaccine_strains.yaml", "r+") as f:
vaccine_strains = yaml.load(f)
vacc_data = data[data["Strain Name"].isin(vaccine_strains.values())]
vacc_data.drop_duplicates(subset=["Strain Name"], inplace=True)
vacc_data["years_deployed"] = 0
vaccine_strains_by_name = defaultdict(list)
for year, strain in vaccine_strains.items():
vaccine_strains_by_name[strain].append(year)
vacc_data["years_deployed"] = vacc_data["Strain Name"].apply(
lambda x: vaccine_strains_by_name[x]
)
vacc_src = ColumnDataSource(vacc_data)
# Resample data to quarterly data.
data = data.set_index("Collection Date").resample("Q").mean()
palette = inferno(len(data))
data["palette"] = palette
src = ColumnDataSource(data)
predcoords = load_prediction_coordinates()
# Make the coordinate scatter plots
p1 = make_coordinate_scatterplot([0, 1], src, predcoords, vacc_src)
p2 = make_coordinate_scatterplot([1, 2], src, predcoords, vacc_src)
p2.x_range = p1.y_range
p3 = make_coordinate_scatterplot([0, 2], src, predcoords, vacc_src)
p3.x_range = p1.x_range
p3.y_range = p2.y_range
# Create the plot layout - using rows.
r1 = row([p1, p2, p3])
evo_script, evo_div = components(r1)
return evo_script, evo_div
| bsd-3-clause |
rrohan/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
jdfoote/MediaWiki-Networks | networkTools.py | 1 | 19949 | import re
import csv
import datetime
import igraph
import sys
from statistics import mean, median
from collections import namedtuple
import pandas as pd
import config
############ Goals: ###################
# High-level Goal
# - Create network objects from edit data
#
# Network Types
# - Co-editing (undirected) - A and B edit the same non-talk page within N
# edits/editors/seconds of each other => increment_edge(A,B)
# - Collaboration (undirected) - network where edit_pattern = (A,B,C,...,A) =>
# for e in edit_pattern: increment_edge(A, e) if A != e
# - Talk (directed) - A and B edit the same talk page within N
# edits/editors/seconds of each other OR
# A edit's B's User_talk page => increment_edge(A,B)
class Edits:
def __init__(self,
fn,
remove_anon = False,
threshold = None,
cutoff_date = None # Ignore edits after this date
):
self.fn = fn
self.threshold = threshold
self.remove_anon = remove_anon
self.cutoff_date = cutoff_date
# Use the non-filtered version to find the last period of mutli-editor activity
def clean_df(self):
try:
self.df = pd.read_csv(self.fn, delimiter='\t', doublequote=False,
dtype={'reverteds':object})
except ValueError:
print("No lines in", fn)
self.df = None
except:
print("Error was:", sys.exc_info()[0])
raise
# Mark reverted edits (want to include bot reverts since these could be spam)
self.mark_reverted_revs()
# Just making it easier to refer to self.df
d = self.df
# Find the automated edits
bots = d['editor'].apply(self.is_bot)
# Store how many there were
self.bot_edit_count = len(bots)
# Remove the automated edits
d = d[~bots]
# Find the duplicate edits
dup_edits = d.apply(lambda x: (x['editor'], x['sha1']) in config.bad_sha_list, axis=1)
self.dup_edit_count = sum(dup_edits)
d = d[~dup_edits]
# Clean out any odd dates
# Start by removing obvious errors (since these can break pd.to_datetime)
good_dates = d['date_time'].str.startswith('2')
self.bad_date_count = len(d) - sum(good_dates)
d = d[good_dates]
# Then convert to datetime
d['date_time'] = pd.to_datetime(d['date_time'], errors="raise")
# Then remove rows with other suspicious dates
good_dates = (d['date_time'] > '2004-01-01') & (d['date_time'] < '2010-04-10')
self.bad_date_count += len(d) - sum(good_dates)
d = d[good_dates]
if self.cutoff_date != None:
d = d[d['date_time'] < self.cutoff_date] # Pretend like data collection happened at cutoff_date
d = d.sort_values('date_time')
# Anons aren't always marked correctly, so recalculate this based on whether the
# user name is an IP address
d['anon'] = d.editor.apply(is_anon)
self.df = d
return None
def threshold_filter(self, filter_func = lambda x: True):
d = self.df
# Anons aren't always marked correctly, so recalculate this based on whether the
# user name is an IP address
d['anon'] = d.editor.apply(is_anon)
if self.remove_anon:
d = d[d['anon']==False]
if self.threshold == None:
self.df = d.copy()
return None
num_edits = len(d[d.apply(filter_func, axis=1)])
# Figure out if there are enough edits to meet our criteria
if num_edits >= self.threshold:
# Only grab the edits that occur before the threshold
# (We do this by sorting by date and then summing the main ns edits and
# removing any edits whose date is greater than the Nth main ns edit
to_include = d.apply(filter_func, axis=1).cumsum()
filtered_d = d[to_include <= self.threshold].copy()
# Get the last active dates before we discard the newer edits
self.final_edit = d['date_time'].iloc[-1]
self.last_activity = self.get_last_active(n_days=30, n_editors = 2, min_date = filtered_d['date_time'].iloc[-1])
d = filtered_d
# Add quality score for each edit
d['quality'] = d.apply(lambda x: quality_score(x), axis = 1)
self.df = d
else:
self.df = None
def mark_reverted_revs(self):
# Create a list of all of the reverted ids
temp_list = list(self.df.loc[self.df.reverteds.notna(), 'reverteds'])
# Some of them are actually a list of ids, so we need to split them.
reverteds = []
for x in temp_list:
reverteds += x.split(',')
self.df['was_reverted'] = self.df.revid.isin(reverteds)
def is_bot(self, editor):
if editor in config.editor_ignore_list or config.bot_query.match(editor):
return True
else:
return False
def num_talk_edits(self):
return len(self.df[self.df['namespace'] % 2 ==1])
def edits_iterator(self):
temp_df = self.df.sort_values(['articleid','date_time'])
rows = temp_df.iterrows()
while rows:
yield next(rows)[1]
class EditNetwork(igraph.Graph):
def __init__(self):
super().__init__(directed=True)
self.temp_edges = []
def median_weight(self):
return median(self.es['weight'])
def mean_weight(self):
return mean(self.es['weight'])
def make_network(self, edges):
if len(edges) == 0:
return None
nodes = set([e.from_node for e in edges] + [e.to_node for e in edges])
nodes = list(nodes)
self.add_vertices(nodes)
self.add_edges([(e.from_node, e.to_node) for e in edges])
self.es['weight'] = 1
# for each attribute, create a list of the values, and add it
# to the list of edges
for att in edges[0]._fields:
if att not in ['from_node', 'to_node']:
self.es[att] = [getattr(e, att) for e in edges]
# Collapsing edges; any filtering should happen before this step
self.collapse_weights()
def subgraph(self, vertices):
v_names = [x['name'] for x in self.vs()]
return self.induced_subgraph([v for v in vertices if v in v_names])
def get_edgelist_with_atts(self):
'''Writes out an edgelist, followed by edge attributes'''
output = []
# Get attribute names from first edge
attributes = sorted(self.es[0].attribute_names())
for e in self.es:
output.append([self.vs[e.source]['name'], #Name of the source node
self.vs[e.target]['name']] + # Name of the target node
[e.attributes()[x] for x in attributes]) # All of the attributes
return {'header':['from_node','to_node'] + attributes, 'data':output}
def collapse_weights(self):
# This will combine edges, summing the weights,
# adding the minimum time, and simplifying the attributes
def min_with_none(x):
filtered_list = [y for y in x if y is not None]
return min(filtered_list) if filtered_list else None
self.simplify(combine_edges={'weight':'sum',
'from_time':'min',
'from_anon': 'first',
'to_anon': 'first',
'timediff': min_with_none,
'intermediate_edits': min_with_none,
'intermediate_editors': min_with_none
})
def make_undirected(self):
'''Makes a graph undirected and sums the weights'''
self.to_undirected(combine_edges={'weight':'sum',
# There is a bug here - when making undirected, the from and to nodes can be
# switched, so anonymity is not preserved.
'from_anon':'first',
'to_anon':'first',
'from_time':'min'})
def dichotomize(self, threshhold = 1):
edges_to_keep = [e for e in self.es if e['weight'] >= threshhold]
temp = self.subgraph_edges(edges_to_keep)
#temp.es['weight'] = 1
return temp
def betweenness(self, vertices=None, normalized=True):
'''Takes a single vertex or list of vertices, and returns the betweenness from igraph.
If normalized == True, then normalizes based on the constant used by ipython in R'''
def normalize_val(x):
# This is the normalization used by ipython in R (http://igraph.org/r/doc/betweenness.html)
if x == 0:
return 0
else:
return x * 2 / (n*n - 3*n + 2)
#if sum(self.es['weight']) != len(self.es()):
#print('Converting to binary network for betweeness centrality')
#self.dichotomize()
non_normalized_betweenness = super(EditNetwork, self).betweenness(vertices=vertices)
n = self.vcount()
if normalized == True:
try:
# If it's just a float, then normalize and return
return normalize_val(non_normalized_betweenness)
except TypeError:
# Otherwise, normalize the whole list, and return
return [normalize_val(x) for x in non_normalized_betweenness]
else:
return non_normalized_betweenness
def hierarchy(self):
'''Returns the hierarchy measure created by Krackhardt(1994) for the graph.
This is defined as the ratio of paths in the graph which are cyclical/reciprocated.
For a given path from v_i to v_j, the path is cyclical if there also exists a path
from v_j to v_i.'''
if not self.is_directed():
raise ValueError("Hierarchy measure is only available on directed networks")
# Get the shortest paths (this tells us whether there is a path between any two nodes)
p = self.shortest_paths()
# Number of hierarchical paths (non-cycles)
h_paths = 0
# Number of cyclical paths
cycles = 0
for i in range(len(p)):
for j in range(len(p)):
# Check if a path exists between the nodes
if i != j and p[i][j] != float('inf'):
# If it does, and the reciprocal path also exist, increment the cycle count
if p[j][i] < float('inf'):
cycles += 1
else:
# Otherwise, increment the h_paths count
h_paths += 1
# Return the ratio of h_paths
if h_paths == cycles == 0:
return None
return h_paths / (h_paths + cycles)
def effective_size(self, vertex):
ego_neighbors = self.neighbors(vertex)
neighbor_graph = self.induced_subgraph(ego_neighbors)
# Calculation of effective size, as described at http://www.analytictech.com/ucinet/help/hs4126.htm
# First, get the degree of all the neighbors
ng_degree = neighbor_graph.degree()
# Then average the degree, and subtract it from the number of neighbors
return len(ng_degree) - sum(ng_degree)/len(ng_degree)
def make_coedit_network(
# Function to use to filter namespaces. By default, it's all non-talk namespaces.
# To get just the main ns, use lambda x: x == 0
namespace_filter=lambda x: x % 2 == 0,
**kwargs): # Additional arguments to pass to make_network
kwargs['edits'] = (x for x in kwargs['edits'] if namespace_filter(x['namespace']))
network = make_network(**kwargs)
return network
def make_talk_network(namespace_filter=lambda x: x % 2 == 1,
# Determines whether to include edges where User A writes on User B's
# talk page, whether or not User B writes back.
include_user_talk = True,
**kwargs):
network = make_network(namespace_filter = namespace_filter,
**kwargs)
if not include_user_talk:
network = network.remove_user_talk()
return network
def make_collaboration_network(namespace_filter=lambda x: x % 2 == 0,
**kwargs):
# TODO: Add filter to only collaborative edits
kwargs['edits'] = (x for x in kwargs['edits'] if namespace_filter(x['namespace']))
network = make_network(**kwargs)
network = network.only_collaborative_edits()
return network
Edge = namedtuple('Edge', ['from_node',
'to_node',
'from_anon',
'to_anon',
'edit_type',
'timediff',
'intermediate_edits',
'intermediate_editors'])
Edge.__new__.__defaults__ = (None,) * len(Edge._fields)
def make_network(edits,
edit_limit=None,
editor_limit=None,
time_limit=None,
section_filter=False,
dichotomize_level=1,
namespace_filter = lambda x: True
):
'''
Creates a network object based on co-edits on the same page. Takes an Edit object.
Also takes a number of parameters that determine whether and edge should be created.
edit_limit will create edges with the contributors of each of the last N edits
(e.g., edit_limit = 1 means that only adjacent edits will result in edges).
editor_limit is similar, but will create edges with the last N editors, while
time_limit creates edges with all editors who have edited in the last N days.
By default, there are no limits, and edges are created/incremented with all
other contributors to the page.
'''
def edges_from_page_edits(page_edits):
'''Go through each edit to a page and figure out which
subsequent edits should have edges to this edit'''
if len(page_edits) == 0:
return []
edges = []
# If it's a talk page, figure out the owner
page_owner = get_talk_page_owner(page_edits[0])
for i, edit in enumerate(page_edits):
# Reset temp variables
curr_edges = []
curr_editors = []
curr_section = get_section_from_comment(edit) if section_filter else None
curr_time = edit['date_time']
intermediate_edits = 1
# If this is a talk page, then add edges to the owner of the page
if page_owner and page_owner != edit['editor']:
edges.append(make_user_talk_edge(edit, page_owner))
# Now loop through all subsequent edits
for j in range(i+1, len(page_edits)):
new_edit = page_edits[j]
# If the sections don't match, then pretend like this edit doesn't exist
if section_filter and get_section_from_comment(new_edit) != curr_section:
continue
# If this edit is too late then break (since all future
# edits will also be too late)
new_time = new_edit['date_time']
# If they are the same person, then mark the previous edits as
# collaborative, and break the inner loop
# (since future edges will be captured once we get to this
# edit in the main loop)
if same_editor(edit,new_edit):
curr_edges = [e._replace(edit_type = 'collaborative') for e in curr_edges]
break
# Add this editor to the set of editors, if necessary
if new_edit['editor'] in curr_editors:
# One edit can't result in multiple
# edges to the same alter. E.g., if A edits the page
# and then B, C, B edit the page A will only have 1 tie with B.
# So, don't add the edge but increment the edit count
intermediate_edits += 1
continue
else:
curr_editors.append(new_edit['editor'])
# Create a new edge, and add it
curr_edges.append(Edge(
from_node = new_edit['editor'],
to_node = edit['editor'],
edit_type = 'normal',
from_anon = new_edit['anon'],
to_anon = edit['anon'],
timediff = new_time - curr_time,
intermediate_edits = intermediate_edits,
intermediate_editors = len(curr_editors),
))
intermediate_edits += 1
# Now check the other parameters and break if they are met
if (
# We incremented this so if it's larger then break
(edit_limit and intermediate_edits > edit_limit) or
# If the next editor is already in the list then we won't create an edge
# So as long as we've reached the limit now we are safe to break
(editor_limit and len(curr_editors) == editor_limit)
):
break
# At the end of the loop, add the edges
edges += curr_edges
return edges
def make_user_talk_edge(edit, page_owner):
return Edge(from_node = edit['editor'],
to_node = page_owner,
from_anon = edit['anon'],
to_anon = is_anon(page_owner),
edit_type = 'user_talk_owner'
)
'''The basic logic is that we identify all the edits on a single
page, then convert that page's edits to edges and move on to the
next page'''
time_limit = datetime.timedelta(days = time_limit) if time_limit else None
all_edges = []
curr_page = ''
curr_page_edits = []
edits = (x for x in edits.edits_iterator() if namespace_filter(x['namespace']))
for edit in edits:
if edit['articleid'] != curr_page:
all_edges += edges_from_page_edits(curr_page_edits)
curr_page_edits = [edit]
curr_page = edit['articleid']
else:
curr_page_edits.append(edit)
# Get the last pages edges
all_edges += edges_from_page_edits(curr_page_edits)
# Make the network
network = EditNetwork()
network.make_network(all_edges)
network = network.dichotomize(dichotomize_level)
if len(network.vs) == 0:
return None
return network
def make_timestamp(edit):
return datetime.datetime.strptime(edit['date_time'], '%Y-%m-%d %H:%M:%S')
def is_anon(username):
'''Check if a username is an ipv4 ip address. We use this as
a marker of whether the user is anonymous'''
if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}',username):
return True
else:
return False
def same_editor(edit1, edit2):
return edit1['editor'] == edit2['editor']
def get_talk_page_owner(edit):
'''Checks a talk page to see if it's a user talk page (ASSUMES THAT
THESE ARE NAMESPACE 3). If it is a user talk
page, then returns the user name. Otherwise, returns None'''
if edit['namespace'] == 3:
return re.match('[^:]+:(.*)$',edit['title']).group(1)
else:
return None
def get_section_from_comment(edit):
'''Finds the section an edit was made to, based on the comment.
ASSUMPTION:
The first edit to a section is formatted as "Section name [dd mon yyyy]".
Subsequent edits are "/* Section name \* Comment here".
If there is no section name, then return None.'''
try:
comment = edit['comment']
except KeyError:
return None
if comment:
a = re.match(r'\/\* (.*) \*\/.*', comment)
if a:
return a.group(1).rstrip()
b = re.match(r'(.*)\[[^]]*\]$', comment)
if b:
return b.group(1).rstrip()
return None
| gpl-2.0 |
OshynSong/scikit-learn | sklearn/neighbors/unsupervised.py | 117 | 4755 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
stevenzhang18/Indeed-Flask | lib/pandas/stats/moments.py | 9 | 39221 | """
Provides rolling statistical moments and related descriptive
statistics implemented in Cython
"""
from __future__ import division
from functools import wraps
from collections import defaultdict
from numpy import NaN
import numpy as np
from pandas.core.api import DataFrame, Series, Panel, notnull
import pandas.algos as algos
import pandas.core.common as pdcom
from pandas.util.decorators import Substitution, Appender
__all__ = ['rolling_count', 'rolling_max', 'rolling_min',
'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov',
'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt',
'rolling_quantile', 'rolling_median', 'rolling_apply',
'rolling_corr_pairwise', 'rolling_window',
'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov',
'expanding_count', 'expanding_max', 'expanding_min',
'expanding_sum', 'expanding_mean', 'expanding_std',
'expanding_cov', 'expanding_corr', 'expanding_var',
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply', 'expanding_corr_pairwise']
#------------------------------------------------------------------------------
# Docs
# The order of arguments for the _doc_template is:
# (header, args, kwargs, returns, notes)
_doc_template = """
%s
Parameters
----------
%s%s
Returns
-------
%s
%s
"""
_roll_kw = """window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
how : string, default '%s'
Method for down- or re-sampling
"""
_roll_notes = r"""
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_ewm_kw = r"""com : float. optional
Center of mass: :math:`\alpha = 1 / (1 + com)`,
span : float, optional
Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`
halflife : float, optional
Specify decay in terms of halflife, :math:`\alpha = 1 - exp(log(0.5) / halflife)`
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings (viewing EWMA as a moving average)
how : string, default 'mean'
Method for down- or re-sampling
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
"""
_ewm_notes = r"""
Notes
-----
Either center of mass, span or halflife must be specified
EWMA is sometimes specified using a "span" parameter `s`, we have that the
decay parameter :math:`\alpha` is related to the span as
:math:`\alpha = 2 / (s + 1) = 1 / (1 + c)`
where `c` is the center of mass. Given a span, the associated center of mass is
:math:`c = (s - 1) / 2`
So a "20-day EWMA" would have center 9.5.
When adjust is True (default), weighted averages are calculated using weights
(1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions
"""
_expanding_kw = """min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
"""
_type_of_input_retval = "y : type of input argument"
_flex_retval = """y : type depends on inputs
DataFrame / DataFrame -> DataFrame (matches on columns) or Panel (pairwise)
DataFrame / Series -> Computes result for each column
Series / Series -> Series"""
_pairwise_retval = "y : Panel whose items are df1.index values"
_unary_arg = "arg : Series, DataFrame\n"
_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray, optional
if not supplied then will default to arg1 and produce pairwise output
"""
_binary_arg = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray
"""
_pairwise_arg = """df1 : DataFrame
df2 : DataFrame
"""
_pairwise_kw = """pairwise : bool, default False
If False then only matching columns between arg1 and arg2 will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a Panel in the case of DataFrame inputs. In the case of missing
elements, only complete pairwise observations will be used.
"""
_ddof_kw = """ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
_bias_kw = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
def rolling_count(arg, window, freq=None, center=False, how=None):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
arg = _conv_timerule(arg, freq, how)
if not center:
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(converted, window, min_periods=0,
center=center) # already converted
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
@Substitution("Unbiased moving covariance.", _binary_arg_flex,
_roll_kw%'None'+_pairwise_kw+_ddof_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None,
center=False, pairwise=None, how=None, ddof=1):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_cov(X, Y):
mean = lambda x: rolling_mean(x, window, min_periods, center=center)
count = rolling_count(X + Y, window, center=center)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
rs = _flex_binary_moment(arg1, arg2, _get_cov, pairwise=bool(pairwise))
return rs
@Substitution("Moving sample correlation.", _binary_arg_flex,
_roll_kw%'None'+_pairwise_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None,
center=False, pairwise=None, how=None):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_corr(a, b):
num = rolling_cov(a, b, window, min_periods, freq=freq,
center=center)
den = (rolling_std(a, window, min_periods, freq=freq,
center=center) *
rolling_std(b, window, min_periods, freq=freq,
center=center))
return num / den
return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise))
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and
isinstance(arg2,(np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if isinstance(arg1, (np.ndarray, Series)) and \
isinstance(arg2, (np.ndarray,Series)):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, DataFrame):
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, DataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index, columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j<i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]))
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
return p
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
@Substitution("Deprecated. Use rolling_corr(..., pairwise=True) instead.\n\n"
"Pairwise moving sample correlation", _pairwise_arg,
_roll_kw%'None', _pairwise_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None,
freq=None, center=False):
import warnings
msg = "rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)"
warnings.warn(msg, FutureWarning, stacklevel=2)
return rolling_corr(df1, df2, window=window, min_periods=min_periods,
freq=freq, center=center,
pairwise=True)
def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
how=None, args=(), kwargs={}, **kwds):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
if values.size == 0:
result = values.copy()
else:
# actually calculate the moment. Faster way to do this?
offset = int((window - 1) / 2.) if center else 0
additional_nans = np.array([np.NaN] * offset)
calc = lambda x: func(np.concatenate((x, additional_nans)) if center else x,
window, minp=minp, args=args, kwargs=kwargs,
**kwds)
if values.ndim > 1:
result = np.apply_along_axis(calc, axis, values)
else:
result = calc(values)
if center:
result = _center_window(result, window, axis)
return return_hook(result)
def _center_window(rs, window, axis):
if axis > rs.ndim-1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = int((window - 1) / 2.)
if offset > 0:
if isinstance(rs, (Series, DataFrame, Panel)):
rs = rs.slice_shift(-offset, axis=axis)
else:
lead_indexer = [slice(None)] * rs.ndim
lead_indexer[axis] = slice(offset, None)
rs = np.copy(rs[tuple(lead_indexer)])
return rs
def _process_data_structure(arg, kill_inf=True):
if isinstance(arg, DataFrame):
return_hook = lambda v: type(arg)(v, index=arg.index,
columns=arg.columns)
values = arg.values
elif isinstance(arg, Series):
values = arg.values
return_hook = lambda v: Series(v, arg.index, name=arg.name)
else:
return_hook = lambda v: v
values = arg
if not issubclass(values.dtype.type, float):
values = values.astype(float)
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return return_hook, values
#------------------------------------------------------------------------------
# Exponential moving moments
def _get_center_of_mass(com, span, halflife):
valid_count = len([x for x in [com, span, halflife] if x is not None])
if valid_count > 1:
raise Exception("com, span, and halflife are mutually exclusive")
if span is not None:
# convert span to center of mass
com = (span - 1) / 2.
elif halflife is not None:
# convert halflife to center of mass
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif com is None:
raise Exception("Must pass one of com, span, or halflife")
return float(com)
@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw,
_type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None,
adjust=True, how=None, ignore_na=False):
arg = _conv_timerule(arg, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _ewma(v):
return algos.ewma(v, com, int(adjust), int(ignore_na), int(min_periods))
return_hook, values = _process_data_structure(arg)
if values.size == 0:
output = values.copy()
else:
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
@Substitution("Exponentially-weighted moving variance", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
freq=None, how=None, ignore_na=False, adjust=True):
arg = _conv_timerule(arg, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _ewmvar(v):
return algos.ewmcov(v, v, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
return_hook, values = _process_data_structure(arg)
if values.size == 0:
output = values.copy()
else:
output = np.apply_along_axis(_ewmvar, 0, values)
return return_hook(output)
@Substitution("Exponentially-weighted moving std", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
ignore_na=False, adjust=True):
result = ewmvar(arg, com=com, span=span, halflife=halflife,
min_periods=min_periods, bias=bias, adjust=adjust, ignore_na=ignore_na)
return _zsqrt(result)
ewmvol = ewmstd
@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex,
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
bias=False, freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _get_ewmcov(X, Y):
# X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
return_hook, x_values = _process_data_structure(X)
return_hook, y_values = _process_data_structure(Y)
cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
return return_hook(cov)
result = _flex_binary_moment(arg1, arg2, _get_ewmcov,
pairwise=bool(pairwise))
return result
@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex,
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _get_ewmcorr(X, Y):
# X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
return_hook, x_values = _process_data_structure(X)
return_hook, y_values = _process_data_structure(Y)
cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
x_var = algos.ewmcov(x_values, x_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
y_var = algos.ewmcov(y_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
corr = cov / _zsqrt(x_var * y_var)
return return_hook(corr)
result = _flex_binary_moment(arg1, arg2, _get_ewmcorr,
pairwise=bool(pairwise))
return result
def _zsqrt(x):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, DataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
#----------------------------------------------------------------------
# Python interface to Cython functions
def _conv_timerule(arg, freq, how):
types = (DataFrame, Series)
if freq is not None and isinstance(arg, types):
# Conform to whatever frequency needed.
arg = arg.resample(freq, how=how)
return arg
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _rolling_func(func, desc, check_minp=_use_window, how=None, additional_kw=''):
if how is None:
how_arg_str = 'None'
else:
how_arg_str = "'%s"%how
@Substitution(desc, _unary_arg, _roll_kw%how_arg_str + additional_kw,
_type_of_input_retval, _roll_notes)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, freq=None, center=False, how=how,
**kwargs):
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=center, how=how, **kwargs)
return f
rolling_max = _rolling_func(algos.roll_max, 'Moving maximum.', how='max')
rolling_min = _rolling_func(algos.roll_min, 'Moving minimum.', how='min')
rolling_sum = _rolling_func(algos.roll_sum, 'Moving sum.')
rolling_mean = _rolling_func(algos.roll_mean, 'Moving mean.')
rolling_median = _rolling_func(algos.roll_median_c, 'Moving median.',
how='median')
_ts_std = lambda *a, **kw: _zsqrt(algos.roll_var(*a, **kw))
rolling_std = _rolling_func(_ts_std, 'Moving standard deviation.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
rolling_var = _rolling_func(algos.roll_var, 'Moving variance.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
rolling_skew = _rolling_func(algos.roll_skew, 'Unbiased moving skewness.',
check_minp=_require_min_periods(3))
rolling_kurt = _rolling_func(algos.roll_kurt, 'Unbiased moving kurtosis.',
check_minp=_require_min_periods(4))
def rolling_quantile(arg, window, quantile, min_periods=None, freq=None,
center=False):
"""Moving quantile.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
def call_cython(arg, window, minp, args=(), kwargs={}):
minp = _use_window(minp, window)
return algos.roll_quantile(arg, window, minp, quantile)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=center)
def rolling_apply(arg, window, func, min_periods=None, freq=None,
center=False, args=(), kwargs={}):
"""Generic moving function application.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
offset = int((window - 1) / 2.) if center else 0
def call_cython(arg, window, minp, args, kwargs):
minp = _use_window(minp, window)
return algos.roll_generic(arg, window, minp, offset, func, args, kwargs)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=False, args=args, kwargs=kwargs)
def rolling_window(arg, window=None, win_type=None, min_periods=None,
freq=None, center=False, mean=True,
axis=0, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
if isinstance(window, (list, tuple, np.ndarray)):
if win_type is not None:
raise ValueError(('Do not specify window type if using custom '
'weights'))
window = pdcom._asarray_tuplesafe(window).astype(float)
elif pdcom.is_integer(window): # window size
if win_type is None:
raise ValueError('Must specify window type')
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window weight')
win_type = _validate_win_type(win_type, kwargs) # may pop from kwargs
window = sig.get_window(win_type, window).astype(float)
else:
raise ValueError('Invalid window %s' % str(window))
minp = _use_window(min_periods, len(window))
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
if values.size == 0:
result = values.copy()
else:
offset = int((len(window) - 1) / 2.) if center else 0
additional_nans = np.array([np.NaN] * offset)
f = lambda x: algos.roll_window(np.concatenate((x, additional_nans)) if center else x,
window, minp, avg=mean)
result = np.apply_along_axis(f, axis, values)
if center:
result = _center_window(result, len(window), axis)
return return_hook(result)
def _validate_win_type(win_type, kwargs):
# may pop from kwargs
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] +
_pop_args(win_type, arg_map[win_type], kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
def _expanding_func(func, desc, check_minp=_use_window, additional_kw=''):
@Substitution(desc, _unary_arg, _expanding_kw + additional_kw,
_type_of_input_retval, "")
@Appender(_doc_template)
@wraps(func)
def f(arg, min_periods=1, freq=None, **kwargs):
window = max(len(arg), min_periods) if min_periods else len(arg)
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
**kwargs)
return f
expanding_max = _expanding_func(algos.roll_max, 'Expanding maximum.')
expanding_min = _expanding_func(algos.roll_min, 'Expanding minimum.')
expanding_sum = _expanding_func(algos.roll_sum, 'Expanding sum.')
expanding_mean = _expanding_func(algos.roll_mean, 'Expanding mean.')
expanding_median = _expanding_func(algos.roll_median_c, 'Expanding median.')
expanding_std = _expanding_func(_ts_std, 'Expanding standard deviation.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
expanding_var = _expanding_func(algos.roll_var, 'Expanding variance.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
expanding_skew = _expanding_func(algos.roll_skew, 'Unbiased expanding skewness.',
check_minp=_require_min_periods(3))
expanding_kurt = _expanding_func(algos.roll_kurt, 'Unbiased expanding kurtosis.',
check_minp=_require_min_periods(4))
def expanding_count(arg, freq=None):
"""
Expanding count of number of non-NaN observations.
Parameters
----------
arg : DataFrame or numpy ndarray-like
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
Returns
-------
expanding_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
return rolling_count(arg, len(arg), freq=freq)
def expanding_quantile(arg, quantile, min_periods=1, freq=None):
"""Expanding quantile.
Parameters
----------
arg : Series, DataFrame
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
return rolling_quantile(arg, len(arg), quantile, min_periods=min_periods,
freq=freq)
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
_expanding_kw+_pairwise_kw+_ddof_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None, ddof=1):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_cov(arg1, arg2, window,
min_periods=min_periods, freq=freq,
pairwise=pairwise, ddof=ddof)
@Substitution("Expanding sample correlation.", _binary_arg_flex,
_expanding_kw+_pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_corr(arg1, arg2, window,
min_periods=min_periods,
freq=freq, pairwise=pairwise)
@Substitution("Deprecated. Use expanding_corr(..., pairwise=True) instead.\n\n"
"Pairwise expanding sample correlation", _pairwise_arg,
_expanding_kw, _pairwise_retval, "")
@Appender(_doc_template)
def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None):
import warnings
msg = "expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)"
warnings.warn(msg, FutureWarning, stacklevel=2)
return expanding_corr(df1, df2, min_periods=min_periods,
freq=freq, pairwise=True)
def expanding_apply(arg, func, min_periods=1, freq=None,
args=(), kwargs={}):
"""Generic expanding function application.
Parameters
----------
arg : Series, DataFrame
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
window = max(len(arg), min_periods) if min_periods else len(arg)
return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq,
args=args, kwargs=kwargs)
| apache-2.0 |
sisnkemp/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
vortex-ape/scikit-learn | sklearn/manifold/isomap.py | 14 | 8028 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import Isomap
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = Isomap(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X, accept_sparse='csr')
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
y : Ignored
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
DistrictDataLabs/yellowbrick | tests/test_model_selection/test_validation_curve.py | 1 | 6121 | # tests.test_model_selection.test_validation_curve
# Tests for the ValidationCurve visualizer
#
# Author: Benjamin Bengfort
# Created: Sat Mar 31 06:25:05 2018 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_validation_curve.py [c5355ee] [email protected] $
"""
Tests for the ValidationCurve visualizer
"""
##########################################################################
# Imports
##########################################################################
import sys
import pytest
import numpy as np
from unittest.mock import patch
from tests.base import VisualTestCase
from sklearn.svm import SVC
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import OneHotEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import ShuffleSplit, StratifiedKFold
from yellowbrick.datasets import load_mushroom
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.model_selection.validation_curve import *
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
# Test Cases
##########################################################################
@pytest.mark.usefixtures("classification", "regression", "clusters")
class TestValidationCurve(VisualTestCase):
"""
Test the ValidationCurve visualizer
"""
@patch.object(ValidationCurve, "draw")
def test_fit(self, mock_draw):
"""
Assert that fit returns self and creates expected properties
"""
X, y = self.classification
params = (
"train_scores_",
"train_scores_mean_",
"train_scores_std_",
"test_scores_",
"test_scores_mean_",
"test_scores_std_",
)
oz = ValidationCurve(
SVC(), param_name="gamma", param_range=np.logspace(-6, -1, 5)
)
for param in params:
assert not hasattr(oz, param)
assert oz.fit(X, y) is oz
mock_draw.assert_called_once()
for param in params:
assert hasattr(oz, param)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_classifier(self):
"""
Test image closeness on a classification dataset with kNN
"""
X, y = self.classification
cv = ShuffleSplit(3, random_state=288)
param_range = np.arange(3, 10)
oz = ValidationCurve(
KNeighborsClassifier(),
param_name="n_neighbors",
param_range=param_range,
cv=cv,
scoring="f1_weighted",
)
oz.fit(X, y)
oz.finalize()
self.assert_images_similar(oz)
def test_regression(self):
"""
Test image closeness on a regression dataset with a DecisionTree
"""
X, y = self.regression
cv = ShuffleSplit(3, random_state=938)
param_range = np.arange(3, 10)
oz = ValidationCurve(
DecisionTreeRegressor(random_state=23),
param_name="max_depth",
param_range=param_range,
cv=cv,
scoring="r2",
)
oz.fit(X, y)
oz.finalize()
self.assert_images_similar(oz, tol=12.0)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_quick_method(self):
"""
Test validation curve quick method with image closeness on SVC
"""
X, y = self.classification
pr = np.logspace(-6, -1, 3)
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=321)
viz = validation_curve(
SVC(), X, y, logx=True, param_name="gamma",
param_range=pr, cv=cv, show=False
)
self.assert_images_similar(viz)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration(self):
"""
Test on mushroom dataset with pandas DataFrame and Series and NB
"""
data = load_mushroom(return_dataset=True)
X, y = data.to_pandas()
X = pd.get_dummies(X)
assert isinstance(X, pd.DataFrame)
assert isinstance(y, pd.Series)
cv = StratifiedKFold(n_splits=2, shuffle=True, random_state=11)
pr = np.linspace(0.1, 3.0, 6)
oz = ValidationCurve(BernoulliNB(), cv=cv, param_range=pr, param_name="alpha")
oz.fit(X, y)
oz.finalize()
self.assert_images_similar(oz)
@pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows")
def test_numpy_integration(self):
"""
Test on mushroom dataset with NumPy arrays
"""
data = load_mushroom(return_dataset=True)
X, y = data.to_numpy()
X = OneHotEncoder().fit_transform(X).toarray()
cv = StratifiedKFold(n_splits=2, shuffle=True, random_state=11)
pr = np.linspace(0.1, 3.0, 6)
oz = ValidationCurve(BernoulliNB(), cv=cv, param_range=pr, param_name="alpha")
oz.fit(X, y)
oz.finalize()
self.assert_images_similar(oz)
@patch.object(ValidationCurve, "draw")
def test_reshape_scores(self, mock_draw):
"""
Test supplying an alternate CV methodology and train_sizes
"""
X, y = self.classification
pr = np.logspace(-6, -1, 3)
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=14)
oz = ValidationCurve(SVC(), param_name="gamma", param_range=pr, cv=cv)
oz.fit(X, y)
assert oz.train_scores_.shape == (3, 5)
assert oz.test_scores_.shape == (3, 5)
def test_bad_train_sizes(self):
"""
Test learning curve with bad input for training size.
"""
with pytest.raises(YellowbrickValueError):
ValidationCurve(SVC(), param_name="gamma", param_range=100)
| apache-2.0 |
ldirer/scikit-learn | sklearn/datasets/mldata.py | 17 | 7875 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home
from ..utils import Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname :
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name : optional, default: 'label'
Name or index of the column containing the target values.
data_name : optional, default: 'data'
Name or index of the column containing the data.
transpose_data : optional, default: True
If True, transpose the downloaded data array.
data_home : optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the scikit-learn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to scikit-learn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by test runners to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
OpenSourcePolicyCenter/taxdata | puf_data/StatMatch/Matching/cpsmar.py | 1 | 31314 | """
Read in raw CPS data file and structure to be used in future scripts
Input file: asec2016_pubuse_v3.dat
Run time is approximately two hours
"""
from collections import OrderedDict
import pandas as pd
from tqdm import tqdm
def h_recs(rec):
"""
Process a household record from the raw CPS file.
Parameters
----------
rec: String containing a CPS household record
Returns
-------
DataFrame with the final record
"""
record = OrderedDict()
record['hrecord'] = [int(rec[0])]
record['h_seq'] = [int(rec[1:6])]
record['hhpos'] = [int(rec[6:8])]
record['hunits'] = [int(rec[8])]
record['hefaminc'] = [int(rec[9:11])]
record['h_respnm'] = [int(rec[11:13])]
record['h_year'] = [int(rec[13:17])]
record['h_hhtype'] = [int(rec[19])]
record['h_numper'] = [int(rec[20:22])]
record['hnumfam'] = [int(rec[22:24])]
record['h_type'] = [int(rec[24])]
record['h_month'] = [int(rec[25:27])]
record['h_mis'] = [int(rec[28])]
record['h_hhnum'] = [int(rec[29])]
record['h_livqrt'] = [int(rec[30:32])]
record['h_typebc'] = [int(rec[32:34])]
record['h_tenure'] = [int(rec[34])]
record['h_telhhd'] = [int(rec[35])]
record['h_telavl'] = [int(rec[36])]
record['h_telint'] = [int(rec[37])]
record['gereg'] = [int(rec[38])]
record['gestfips'] = [int(rec[41:43])]
record['gtcbsa'] = [int(rec[43:48])]
record['gtco'] = [int(rec[48:51])]
record['gtcbsast'] = [int(rec[51])]
record['gtmetsta'] = [int(rec[52])]
record['gtindvpc'] = [int(rec[53])]
record['gtcbsasz'] = [int(rec[54])]
record['gtcsa'] = [int(rec[55:58])]
record['hunder15'] = [int(rec[59:61])]
record['hh5to18'] = [int(rec[67:69])]
record['hhotlun'] = [int(rec[69])]
record['hhotno'] = [int(rec[70])]
record['hflunch'] = [int(rec[71])]
record['hflunno'] = [int(rec[72])]
record['hpublic'] = [int(rec[73])]
record['hlorent'] = [int(rec[74])]
record['hfoodsp'] = [int(rec[75])]
record['hfoodno'] = [int(rec[76])]
record['hfoodmo'] = [int(rec[78:80])]
record['hengast'] = [int(rec[84])]
record['hengval'] = [int(rec[85:89])]
record['hinc_ws'] = [int(rec[89])]
record['hwsval'] = [int(rec[90:97])]
record['hinc_se'] = [int(rec[97])]
record['hseval'] = [int(rec[98:105])]
record['hinc_fr'] = [int(rec[105])]
record['hfrval'] = [int(rec[106:113])]
record['hinc_uc'] = [int(rec[113])]
record['hucval'] = [int(rec[114:121])]
record['hinc_wc'] = [int(rec[121])]
record['hwcval'] = [int(rec[122:129])]
record['hss_yn'] = [int(rec[129])]
record['hssval'] = [int(rec[130:137])]
record['hssi_yn'] = [int(rec[137])]
record['hssival'] = [int(rec[138:144])]
record['hpaw_yn'] = [int(rec[144])]
record['hpawval'] = [int(rec[145:151])]
record['hvet_yn'] = [int(rec[151])]
record['hvetval'] = [int(rec[152:159])]
record['hsur_yn'] = [int(rec[159])]
record['hsurval'] = [int(rec[160:167])]
record['hdis_yn'] = [int(rec[167])]
record['hdisval'] = [int(rec[168:175])]
record['hret_yn'] = [int(rec[175])]
record['hretval'] = [int(rec[176:183])]
record['hint_yn'] = [int(rec[183])]
record['hintval'] = [int(rec[184:191])]
record['hdiv_yn'] = [int(rec[191])]
record['hdivval'] = [int(rec[192:199])]
record['hrnt_yn'] = [int(rec[199])]
record['hrntval'] = [int(rec[200:207])]
record['hed_yn'] = [int(rec[207])]
record['hedval'] = [int(rec[208:215])]
record['hcsp_yn'] = [int(rec[215])]
record['hcspval'] = [int(rec[216:223])]
record['hfin_yn'] = [int(rec[231])]
record['hfinval'] = [int(rec[232:239])]
record['hoi_yn'] = [int(rec[239])]
record['hoival'] = [int(rec[240:247])]
record['htotval'] = [int(rec[247:255])]
record['hearnval'] = [int(rec[255:263])]
record['hothval'] = [int(rec[263:271])]
record['hhinc'] = [int(rec[271:273])]
record['hmcare'] = [int(rec[273])]
record['hmcaid'] = [int(rec[274])]
record['hchamp'] = [int(rec[275])]
record['hhi_yn'] = [int(rec[276])]
record['hhstatus'] = [int(rec[277])]
record['hunder18'] = [int(rec[278:280])]
record['htop5pct'] = [int(rec[280])]
record['hpctcut'] = [int(rec[281:283])]
record['hsup_wgt'] = [float(rec[286:292] + '.' + rec[292:294])]
record['h1tenure'] = [int(rec[294])]
record['h1livqrt'] = [int(rec[296])]
record['h1telhhd'] = [int(rec[298])]
record['h1telavl'] = [int(rec[299])]
record['h1telint'] = [int(rec[300])]
record['i_hhotlu'] = [int(rec[307])]
record['i_hhotno'] = [int(rec[308])]
record['i_hflunc'] = [int(rec[309])]
record['i_hflunn'] = [int(rec[310])]
record['i_hpubli'] = [int(rec[311])]
record['i_hloren'] = [int(rec[312])]
record['i_hfoods'] = [int(rec[313])]
record['i_hfdval'] = [int(rec[314])]
record['i_hfoodn'] = [int(rec[315])]
record['i_hfoodm'] = [int(rec[316])]
record['i_hengas'] = [int(rec[317])]
record['i_hengva'] = [int(rec[318])]
record['h_idnum2'] = [str(rec[319:324])]
record['prop_tax'] = [int(rec[331:336])]
record['housret'] = [int(rec[336:341])]
record['hrhtype'] = [int(rec[341:343])]
record['h_idnum1'] = [str(rec[343:358])]
record['i_hunits'] = [int(rec[358])]
record['hrpaidcc'] = [int(rec[366])]
record['hprop_val'] = [int(rec[367:375])]
record['thprop_val'] = [int(rec[375])]
record['i_propval'] = [int(rec[376])]
record['hrnumwic'] = [int(rec[382:384])]
record['hrwicyn'] = [int(rec[385])]
record['hfdval'] = [int(rec[386:391])]
record['tcare_val'] = [int(rec[391])]
record['care_val'] = [int(rec[392:398])]
record['i_careval'] = [int(rec[398])]
record['hpres_mort'] = [int(rec[399])]
return pd.DataFrame(record)
def f_recs(rec):
"""
Process a family record from the raw CPS file.
Parameters
----------
rec: String containing a CPS family record
Returns
-------
DataFrame with the final record
"""
record = OrderedDict()
record['frecord'] = [int(rec[0])]
record['fh_seq'] = [int(rec[1:6])]
record['ffpos'] = [int(rec[6:8])]
record['fkind'] = [int(rec[8])]
record['ftype'] = [int(rec[9])]
record['fpersons'] = [int(rec[10:12])]
record['fheadidx'] = [int(rec[12:14])]
record['fwifeidx'] = [int(rec[14:16])]
record['fhusbidx'] = [int(rec[16:18])]
record['fspouidx'] = [int(rec[18:20])]
record['flastidx'] = [int(rec[20:22])]
record['fmlasidx'] = [int(rec[22:24])]
record['fownu6'] = [int(rec[24])]
record['fownu18'] = [int(rec[26])]
record['frelu6'] = [int(rec[27])]
record['frelu18'] = [int(rec[28])]
record['fpctcut'] = [int(rec[29:31])]
record['fpovcut'] = [int(rec[31:36])]
record['famlis'] = [int(rec[36])]
record['povll'] = [int(rec[37:39])]
record['frspov'] = [int(rec[39:41])]
record['frsppct'] = [int(rec[41:46])]
record['finc_ws'] = [int(rec[46])]
record['fwsval'] = [int(rec[47:54])]
record['finc_se'] = [int(rec[54])]
record['fseval'] = [int(rec[55:62])]
record['finc_fr'] = [int(rec[62])]
record['ffrval'] = [int(rec[63:70])]
record['finc_uc'] = [int(rec[70])]
record['fucval'] = [int(rec[71:78])]
record['finc_wc'] = [int(rec[78])]
record['fwcval'] = [int(rec[79:86])]
record['finc_ss'] = [int(rec[86])]
record['fssval'] = [int(rec[87:94])]
record['finc_ssi'] = [int(rec[94])]
record['fssival'] = [int(rec[95:101])]
record['finc_paw'] = [int(rec[101])]
record['fpawval'] = [int(rec[102:108])]
record['finc_vet'] = [int(rec[108])]
record['fvetval'] = [int(rec[109:116])]
record['finc_sur'] = [int(rec[116])]
record['fsurval'] = [int(rec[117:124])]
record['finc_dis'] = [int(rec[124])]
record['fdisval'] = [int(rec[125:132])]
record['finc_ret'] = [int(rec[132])]
record['fretval'] = [int(rec[133:140])]
record['finc_int'] = [int(rec[140])]
record['fintval'] = [int(rec[141:148])]
record['finc_div'] = [int(rec[148])]
record['fdivval'] = [int(rec[149:156])]
record['finc_rnt'] = [int(rec[156])]
record['frntval'] = [int(rec[157:164])]
record['finc_ed'] = [int(rec[164])]
record['fedval'] = [int(rec[165:172])]
record['finc_csp'] = [int(rec[172])]
record['fcspval'] = [int(rec[173:180])]
record['finc_fin'] = [int(rec[188])]
record['ffinval'] = [int(rec[189:196])]
record['finc_oi'] = [int(rec[196])]
record['foival'] = [int(rec[197:204])]
record['ftotval'] = [int(rec[204:212])]
record['fearnval'] = [int(rec[212:220])]
record['fothval'] = [int(rec[220:228])]
record['ftot_r'] = [int(rec[228:230])]
record['fspanish'] = [int(rec[230])]
record['fsup_wgt'] = [float(rec[232:238] + '.' + rec[238:240])]
record['ffposold'] = [int(rec[240:242])]
record['f_mv_fs'] = [int(rec[242:247])]
record['f_mv_sl'] = [int(rec[247:251])]
record['fhoussub'] = [int(rec[260:263])]
record['fhip_val'] = [int(rec[271:278])]
record['fmoop'] = [int(rec[278:285])]
record['fotc_val'] = [int(rec[285:291])]
record['fmed_val'] = [int(rec[291:298])]
record['i_fhipval'] = [int(rec[298])]
return pd.DataFrame(record)
def p_recs(rec):
"""
Process a person record from the raw CPS file.
Parameters
----------
rec: String containing a CPS person record
Returns
-------
DataFrame with the final record
"""
record = OrderedDict()
record['precord'] = [int(rec[0])]
record['ph_seq'] = [int(rec[1:6])]
record['pppos'] = [int(rec[6:8])]
record['ppposold'] = [int(rec[8:10])]
record['a_lineno'] = [int(rec[10:12])]
record['a_parent'] = [int(rec[12:14])]
record['a_exprrp'] = [int(rec[14:16])]
record['perrp'] = [int(rec[16:18])]
record['a_age'] = [int(rec[18:20])]
record['a_maritl'] = [int(rec[20])]
record['a_spouse'] = [int(rec[21:23])]
record['a_sex'] = [int(rec[23])]
record['a_hga'] = [int(rec[24:26])]
record['prdtrace'] = [int(rec[26:28])]
record['p_stat'] = [int(rec[28])]
record['prpertyp'] = [int(rec[29])]
record['pehspnon'] = [int(rec[30])]
record['prdthsp'] = [int(rec[31])]
record['a_famnum'] = [int(rec[32:34])]
record['a_famtyp'] = [int(rec[34])]
record['a_famrel'] = [int(rec[35])]
record['a_pfrel'] = [int(rec[36])]
record['hhdrel'] = [int(rec[37])]
record['famrel'] = [int(rec[38:40])]
record['hhdfmx'] = [int(rec[40:42])]
record['parent'] = [int(rec[42])]
record['age1'] = [int(rec[43:45])]
record['phf_seq'] = [int(rec[45:47])]
record['pf_seq'] = [int(rec[47:49])]
record['pecohab'] = [int(rec[49:51])]
record['pelnmom'] = [int(rec[51:53])]
record['pelndad'] = [int(rec[53:55])]
record['pemomtyp'] = [int(rec[55:57])]
record['pedadtyp'] = [int(rec[57:59])]
record['peafever'] = [int(rec[59:61])]
record['peafwhn1'] = [int(rec[61:63])]
record['peafwhn2'] = [int(rec[63:65])]
record['peafwhn3'] = [int(rec[65:67])]
record['peafwhn4'] = [int(rec[67:69])]
record['pedisear'] = [int(rec[69:71])]
record['pediseye'] = [int(rec[71:73])]
record['pedisrem'] = [int(rec[73:75])]
record['pedisphy'] = [int(rec[75:77])]
record['pedisdrs'] = [int(rec[77:79])]
record['pedisout'] = [int(rec[79:81])]
record['prdisflg'] = [int(rec[81:83])]
record['penatvty'] = [int(rec[83:86])]
record['pemntvty'] = [int(rec[86:89])]
record['pefntvty'] = [int(rec[89:92])]
record['peinusyr'] = [int(rec[92:94])]
record['prcitshp'] = [int(rec[94])]
record['peridnum'] = [str(rec[95:117])]
record['fl_665'] = [int(rec[117])]
record['prdasian'] = [int(rec[118:120])]
record['a_fnlwgt'] = [float(rec[138:144] + '.' + rec[144:146])]
record['a_ernlwt'] = [float(rec[146:152] + '.' + rec[152:154])]
record['marsupwt'] = [float(rec[154:160] + '.' + rec[160:162])]
record['a_hrs1'] = [int(rec[162:164])]
record['a_uslft'] = [int(rec[164])]
record['a_whyabs'] = [int(rec[165])]
record['a_payabs'] = [int(rec[166])]
record['peioind'] = [int(rec[167:171])]
record['peioocc'] = [int(rec[171:175])]
record['a_clswkr'] = [int(rec[175])]
record['a_wkslk'] = [int(rec[176:179])]
record['a_whenlj'] = [int(rec[179])]
record['a_nlflj'] = [int(rec[180])]
record['a_wantjb'] = [int(rec[181])]
record['prerelg'] = [int(rec[182])]
record['a_uslhrs'] = [int(rec[183:185])]
record['a_hrlywk'] = [int(rec[185])]
record['a_hrspay'] = [float(rec[186:188] + '.' + rec[188:190])]
record['a_grswk'] = [int(rec[190:194])]
record['a_unmem'] = [int(rec[194])]
record['a_uncov'] = [int(rec[195])]
record['a_enrlw'] = [int(rec[196])]
record['a_hscol'] = [int(rec[197])]
record['a_ftpt'] = [int(rec[198])]
record['a_lfsr'] = [int(rec[199])]
record['a_untype'] = [int(rec[200])]
record['a_wkstat'] = [int(rec[201])]
record['a_explf'] = [int(rec[202])]
record['a_wksch'] = [int(rec[203])]
record['a_civlf'] = [int(rec[204])]
record['a_ftlf'] = [int(rec[205])]
record['a_mjind'] = [int(rec[206:208])]
record['a_dtind'] = [int(rec[208:210])]
record['a_mjocc'] = [int(rec[210:212])]
record['a_dtocc'] = [int(rec[212:214])]
record['peio1cow'] = [int(rec[214:216])]
record['prcow1'] = [int(rec[216])]
record['pemlr'] = [int(rec[217])]
record['pruntype'] = [int(rec[218])]
record['prwkstat'] = [int(rec[219:221])]
record['prptrea'] = [int(rec[221:223])]
record['prdisc'] = [int(rec[223])]
record['peabsrsn'] = [int(rec[224:226])]
record['prnlfsch'] = [int(rec[226])]
record['pehruslt'] = [int(rec[227:230])]
record['workyn'] = [int(rec[250])]
record['wrk_ck'] = [int(rec[251])]
record['wtemp'] = [int(rec[252])]
record['nwlook'] = [int(rec[253])]
record['nwlkwk'] = [int(rec[254:256])]
record['rsnnotw'] = [int(rec[256])]
record['wkswork'] = [int(rec[257:259])]
record['wkcheck'] = [int(rec[259])]
record['losewks'] = [int(rec[260])]
record['lknone'] = [int(rec[261])]
record['lkweeks'] = [int(rec[262:264])]
record['lkstrch'] = [int(rec[264])]
record['pyrsn'] = [int(rec[265])]
record['phmemprs'] = [int(rec[266])]
record['hrswk'] = [int(rec[267:269])]
record['hrcheck'] = [int(rec[269])]
record['ptyn'] = [int(rec[270])]
record['ptweeks'] = [int(rec[271:273])]
record['ptrsn'] = [int(rec[273])]
record['wexp'] = [int(rec[274:276])]
record['wewkrs'] = [int(rec[276])]
record['welknw'] = [int(rec[277])]
record['weuemp'] = [int(rec[278])]
record['earner'] = [int(rec[279])]
record['clwk'] = [int(rec[280])]
record['weclw'] = [int(rec[281])]
record['poccu2'] = [int(rec[282:284])]
record['wemocg'] = [int(rec[284:286])]
record['weind'] = [int(rec[286:288])]
record['wemind'] = [int(rec[288:290])]
record['ljcw'] = [int(rec[290])]
record['industry'] = [int(rec[291:295])]
record['occup'] = [int(rec[295:299])]
record['noemp'] = [int(rec[299])]
record['nxtres'] = [int(rec[320:322])]
record['mig_cbst'] = [int(rec[322])]
record['migsame'] = [int(rec[323])]
record['mig_reg'] = [int(rec[324])]
record['mig_st'] = [int(rec[325:327])]
record['mig_dscp'] = [int(rec[327])]
record['gediv'] = [int(rec[328])]
record['mig_div'] = [int(rec[329:331])]
record['mig_mtr1'] = [int(rec[331:333])]
record['mig_mtr3'] = [int(rec[333])]
record['mig_mtr4'] = [int(rec[334])]
record['ern_yn'] = [int(rec[351])]
record['ern_srce'] = [int(rec[352])]
record['ern_otr'] = [int(rec[353])]
record['ern_val'] = [int(rec[354:361])]
record['wageotr'] = [int(rec[361])]
record['wsal_yn'] = [int(rec[362])]
record['wsal_val'] = [int(rec[363:370])]
record['ws_val'] = [int(rec[370:377])]
record['seotr'] = [int(rec[377])]
record['semp_yn'] = [int(rec[378])]
record['semp_val'] = [int(rec[379:386])]
record['se_val'] = [int(rec[386:392])]
record['frmotr'] = [int(rec[392])]
record['frse_yn'] = [int(rec[393])]
record['frse_val'] = [int(rec[394:401])]
record['frm_val'] = [int(rec[401:407])]
record['uc_yn'] = [int(rec[407])]
record['subuc'] = [int(rec[408])]
record['strkuc'] = [int(rec[409])]
record['uc_val'] = [int(rec[410:415])]
record['wc_yn'] = [int(rec[415])]
record['wc_type'] = [int(rec[416])]
record['wc_val'] = [int(rec[417:422])]
record['ss_yn'] = [int(rec[422])]
record['ss_val'] = [int(rec[423:428])]
record['resnss1'] = [int(rec[428])]
record['resnss2'] = [int(rec[429])]
record['sskidyn'] = [int(rec[430])]
record['ssi_yn'] = [int(rec[431])]
record['ssi_val'] = [int(rec[432:437])]
record['resnssi1'] = [int(rec[437])]
record['resnssi2'] = [int(rec[438])]
record['ssikidyn'] = [int(rec[439])]
record['paw_yn'] = [int(rec[440])]
record['paw_typ'] = [int(rec[441])]
record['paw_mon'] = [int(rec[442:444])]
record['paw_val'] = [int(rec[444:449])]
record['vet_yn'] = [int(rec[449])]
record['vet_typ1'] = [int(rec[450])]
record['vet_typ2'] = [int(rec[451])]
record['vet_typ3'] = [int(rec[452])]
record['vet_typ4'] = [int(rec[453])]
record['vet_typ5'] = [int(rec[454])]
record['vet_qva'] = [int(rec[455])]
record['vet_val'] = [int(rec[456:461])]
record['sur_yn'] = [int(rec[461])]
record['sur_sc1'] = [int(rec[462:464])]
record['sur_sc2'] = [int(rec[464:466])]
record['sur_val1'] = [int(rec[466:471])]
record['sur_val2'] = [int(rec[471:476])]
record['srvs_val'] = [int(rec[476:482])]
record['dis_hp'] = [int(rec[482])]
record['dis_cs'] = [int(rec[483])]
record['dis_yn'] = [int(rec[484])]
record['dis_sc1'] = [int(rec[485:487])]
record['dis_sc2'] = [int(rec[487:489])]
record['dis_val1'] = [int(rec[489:494])]
record['dis_val2'] = [int(rec[494:499])]
record['dsab_val'] = [int(rec[499:505])]
record['ret_yn'] = [int(rec[505])]
record['ret_sc1'] = [int(rec[506])]
record['ret_sc2'] = [int(rec[507])]
record['ret_val1'] = [int(rec[508:513])]
record['ret_val2'] = [int(rec[513:518])]
record['rtm_val'] = [int(rec[518:524])]
record['int_yn'] = [int(rec[524])]
record['int_val'] = [int(rec[525:530])]
record['div_yn'] = [int(rec[530])]
record['div_val'] = [int(rec[532:538])]
record['rnt_yn'] = [int(rec[538])]
record['rnt_val'] = [int(rec[539:544])]
record['ed_yn'] = [int(rec[544])]
record['oed_typ1'] = [int(rec[545])]
record['oed_typ2'] = [int(rec[546])]
record['oed_typ3'] = [int(rec[547])]
record['ed_val'] = [int(rec[548:553])]
record['csp_yn'] = [int(rec[553])]
record['csp_val'] = [int(rec[554:559])]
record['fin_yn'] = [int(rec[565])]
record['fin_val'] = [int(rec[566:571])]
record['oi_off'] = [int(rec[571:573])]
record['oi_yn'] = [int(rec[573])]
record['oi_val'] = [int(rec[574:579])]
record['ptotval'] = [int(rec[579:587])]
record['pearnval'] = [int(rec[587:595])]
record['pothval'] = [int(rec[595:603])]
record['ptot_r'] = [int(rec[603:605])]
record['perlis'] = [int(rec[605])]
record['pov_univ'] = [int(rec[606])]
record['wicyn'] = [int(rec[607])]
record['mcare'] = [int(rec[628])]
record['mcaid'] = [int(rec[634])]
record['champ'] = [int(rec[640])]
record['hi_yn'] = [int(rec[641])]
record['hiown'] = [int(rec[642])]
record['hiemp'] = [int(rec[643])]
record['hipaid'] = [int(rec[644])]
record['emcontrb'] = [int(rec[645:649])]
record['hi'] = [int(rec[649])]
record['hityp'] = [int(rec[650])]
record['dephi'] = [int(rec[651])]
record['hilin1'] = [int(rec[652:654])]
record['hilin2'] = [int(rec[654:656])]
record['paid'] = [int(rec[656])]
record['hiout'] = [int(rec[657])]
record['priv'] = [int(rec[658])]
record['prityp'] = [int(rec[659])]
record['depriv'] = [int(rec[660])]
record['pilin1'] = [int(rec[661:663])]
record['pilin2'] = [int(rec[663:665])]
record['pout'] = [int(rec[665])]
record['out'] = [int(rec[666])]
record['care'] = [int(rec[667])]
record['caid'] = [int(rec[668])]
record['mon'] = [int(rec[669:671])]
record['oth'] = [int(rec[671])]
record['otyp_1'] = [int(rec[672])]
record['otyp_2'] = [int(rec[673])]
record['otyp_3'] = [int(rec[674])]
record['otyp_4'] = [int(rec[675])]
record['otyp_5'] = [int(rec[676])]
record['othstper'] = [int(rec[677])]
record['othstyp1'] = [int(rec[678:680])]
record['othstyp2'] = [int(rec[680:682])]
record['othstyp3'] = [int(rec[682:684])]
record['othstyp4'] = [int(rec[684:686])]
record['othstyp5'] = [int(rec[686:688])]
record['othstyp6'] = [int(rec[688:690])]
record['hea'] = [int(rec[690])]
record['ihsflg'] = [int(rec[691])]
record['ahiper'] = [int(rec[692])]
record['ahityp1'] = [int(rec[693:695])]
record['ahityp2'] = [int(rec[695:697])]
record['ahityp3'] = [int(rec[697:699])]
record['ahityp4'] = [int(rec[699:701])]
record['ahityp5'] = [int(rec[701:703])]
record['ahityp6'] = [int(rec[703:705])]
record['pchip'] = [int(rec[705])]
record['cov_gh'] = [int(rec[706])]
record['cov_hi'] = [int(rec[707])]
record['ch_mc'] = [int(rec[708])]
record['ch_hi'] = [int(rec[709])]
record['marg_tax'] = [int(rec[723:725])]
record['ctc_crd'] = [int(rec[725:730])]
record['penplan'] = [int(rec[730])]
record['penincl'] = [int(rec[731])]
record['filestat'] = [int(rec[732])]
record['dep_stat'] = [int(rec[733:735])]
record['eit_cred'] = [int(rec[735:739])]
record['actc_crd'] = [int(rec[739:743])]
record['fica'] = [int(rec[743:748])]
record['fed_ret'] = [int(rec[748:754])]
record['agi'] = [int(rec[754:761])]
record['tax_inc'] = [int(rec[762:769])]
record['fedtax_bc'] = [int(rec[769:776])]
record['fedtax_ac'] = [int(rec[776:783])]
record['statetax_bc'] = [int(rec[783:789])]
record['statetax_ac'] = [int(rec[789:795])]
record['prswkxpns'] = [int(rec[795:799])]
record['paidccyn'] = [int(rec[799])]
record['paidcyna'] = [int(rec[800])]
record['moop'] = [int(rec[801:808])]
record['phip_val'] = [int(rec[808:814])]
record['potc_val'] = [int(rec[814:819])]
record['pmed_val'] = [int(rec[819:825])]
record['chsp_val'] = [int(rec[825:830])]
record['chsp_yn'] = [int(rec[830])]
record['chelsew_yn'] = [int(rec[831])]
record['axrrp'] = [int(rec[852])]
record['axage'] = [int(rec[853])]
record['axmaritl'] = [int(rec[854])]
record['axspouse'] = [int(rec[855])]
record['axsex'] = [int(rec[856])]
record['axhga'] = [int(rec[857])]
record['pxrace1'] = [int(rec[858:860])]
record['pxhspnon'] = [int(rec[860:862])]
record['pxcohab'] = [int(rec[862:864])]
record['pxlnmom'] = [int(rec[864:866])]
record['pxlndad'] = [int(rec[866:868])]
record['pxmomtyp'] = [int(rec[868:870])]
record['pxdadtyp'] = [int(rec[870:872])]
record['pxafever'] = [int(rec[872:874])]
record['pxafwhn1'] = [int(rec[874:876])]
record['pxdisear'] = [int(rec[876:878])]
record['pxdiseye'] = [int(rec[878:880])]
record['pxdisrem'] = [int(rec[880:882])]
record['pxdisphy'] = [int(rec[882:884])]
record['pxdisdrs'] = [int(rec[884:886])]
record['pxdisout'] = [int(rec[886:888])]
record['pxnatvty'] = [int(rec[888:890])]
record['pxmntvty'] = [int(rec[890:892])]
record['pxfntvty'] = [int(rec[892:894])]
record['pxinusyr'] = [int(rec[894:896])]
record['prwernal'] = [int(rec[896])]
record['prhernal'] = [int(rec[897])]
record['axhrs'] = [int(rec[898])]
record['axwhyabs'] = [int(rec[899])]
record['axpayabs'] = [int(rec[900])]
record['axclswkr'] = [int(rec[901])]
record['axnlflj'] = [int(rec[902])]
record['axuslhrs'] = [int(rec[903])]
record['axhrlywk'] = [int(rec[904])]
record['axunmem'] = [int(rec[905])]
record['axuncov'] = [int(rec[906])]
record['axenrlw'] = [int(rec[907])]
record['axhscol'] = [int(rec[908])]
record['axftpt'] = [int(rec[909])]
record['axlfsr'] = [int(rec[910])]
record['i_workyn'] = [int(rec[911])]
record['i_wtemp'] = [int(rec[912])]
record['i_nwlook'] = [int(rec[913])]
record['i_nwlkwk'] = [int(rec[914])]
record['i_rsnnot'] = [int(rec[915])]
record['i_wkswk'] = [int(rec[916])]
record['i_wkchk'] = [int(rec[917])]
record['i_losewk'] = [int(rec[918])]
record['i_lkweek'] = [int(rec[919])]
record['i_lkstr'] = [int(rec[920])]
record['i_pyrsn'] = [int(rec[921])]
record['i_phmemp'] = [int(rec[922])]
record['i_hrswk'] = [int(rec[923])]
record['i_hrchk'] = [int(rec[924])]
record['i_ptyn'] = [int(rec[925])]
record['i_ptwks'] = [int(rec[926])]
record['i_ptrsn'] = [int(rec[927])]
record['i_ljcw'] = [int(rec[928])]
record['i_indus'] = [int(rec[929])]
record['i_occup'] = [int(rec[930])]
record['i_noemp'] = [int(rec[931])]
record['i_nxtres'] = [int(rec[932])]
record['i_mig1'] = [int(rec[933])]
record['i_mig2'] = [int(rec[934:936])]
record['i_mig3'] = [int(rec[936])]
record['i_disyn'] = [int(rec[937])]
record['i_ernyn'] = [int(rec[938])]
record['i_ernsrc'] = [int(rec[939])]
record['i_ernval'] = [int(rec[940])]
record['i_retsc2'] = [int(rec[941])]
record['i_wsyn'] = [int(rec[942])]
record['i_wsval'] = [int(rec[943])]
record['i_seyn'] = [int(rec[944])]
record['i_seval'] = [int(rec[945])]
record['i_frmyn'] = [int(rec[946])]
record['i_frmval'] = [int(rec[947])]
record['i_ucyn'] = [int(rec[948])]
record['i_ucval'] = [int(rec[949])]
record['i_wcyn'] = [int(rec[950])]
record['i_wctyp'] = [int(rec[951])]
record['i_wcval'] = [int(rec[952])]
record['i_ssyn'] = [int(rec[953])]
record['i_ssval'] = [int(rec[954])]
record['resnssa'] = [int(rec[955])]
record['i_ssiyn'] = [int(rec[956])]
record['sskidyna'] = [int(rec[957])]
record['i_ssival'] = [int(rec[958])]
record['resnssia'] = [int(rec[959])]
record['i_pawyn'] = [int(rec[960])]
record['ssikdyna'] = [int(rec[961])]
record['i_pawtyp'] = [int(rec[962])]
record['i_pawmo'] = [int(rec[963])]
record['i_pawval'] = [int(rec[964])]
record['i_vetyn'] = [int(rec[965])]
record['i_vettyp'] = [int(rec[966])]
record['i_vetqva'] = [int(rec[967])]
record['i_vetval'] = [int(rec[968])]
record['i_suryn'] = [int(rec[969])]
record['i_sursc1'] = [int(rec[970])]
record['i_sursc2'] = [int(rec[971])]
record['i_survl1'] = [int(rec[972])]
record['i_survl2'] = [int(rec[973])]
record['i_dishp'] = [int(rec[974])]
record['i_discs'] = [int(rec[975])]
record['i_dissc1'] = [int(rec[976])]
record['i_dissc2'] = [int(rec[977])]
record['i_disvl1'] = [int(rec[978])]
record['i_disvl2'] = [int(rec[979])]
record['i_retyn'] = [int(rec[980])]
record['i_retsc1'] = [int(rec[981])]
record['i_retvl1'] = [int(rec[982])]
record['i_retvl2'] = [int(rec[983])]
record['i_intyn'] = [int(rec[984])]
record['i_intval'] = [int(rec[985])]
record['i_divyn'] = [int(rec[986])]
record['i_divval'] = [int(rec[987])]
record['i_rntyn'] = [int(rec[988])]
record['i_rntval'] = [int(rec[989])]
record['i_edyn'] = [int(rec[990])]
record['i_edtyp1'] = [int(rec[991])]
record['i_edtyp2'] = [int(rec[992])]
record['i_oedval'] = [int(rec[993])]
record['i_cspyn'] = [int(rec[994])]
record['i_cspval'] = [int(rec[995])]
record['i_finyn'] = [int(rec[998])]
record['i_finval'] = [int(rec[999])]
record['i_oival'] = [int(rec[1000])]
record['wicyna'] = [int(rec[1001])]
record['i_hi'] = [int(rec[1002])]
record['i_dephi'] = [int(rec[1003])]
record['i_paid'] = [int(rec[1004])]
record['i_hiout'] = [int(rec[1005])]
record['i_priv'] = [int(rec[1006])]
record['i_depriv'] = [int(rec[1007])]
record['i_pout'] = [int(rec[1008])]
record['i_out'] = [int(rec[1009])]
record['i_care'] = [int(rec[1010])]
record['i_caid'] = [int(rec[1011])]
record['i_mon'] = [int(rec[1012])]
record['i_oth'] = [int(rec[1013])]
record['i_otyp'] = [int(rec[1014])]
record['i_ostper'] = [int(rec[1015])]
record['i_ostyp'] = [int(rec[1016])]
record['i_hea'] = [int(rec[1017])]
record['iahiper'] = [int(rec[1018])]
record['iahityp'] = [int(rec[1019])]
record['i_pchip'] = [int(rec[1020])]
record['i_penpla'] = [int(rec[1021])]
record['i_peninc'] = [int(rec[1022])]
record['i_phipval'] = [int(rec[1023])]
record['i_potcval'] = [int(rec[1024])]
record['i_pmedval'] = [int(rec[1025])]
record['i_chspval'] = [int(rec[1026])]
record['i_chspyn'] = [int(rec[1027])]
record['i_chelsewyn'] = [int(rec[1028])]
record['a_werntf'] = [int(rec[1049])]
record['a_herntf'] = [int(rec[1050])]
record['tcernval'] = [int(rec[1051])]
record['tcwsval'] = [int(rec[1052])]
record['tcseval'] = [int(rec[1053])]
record['tcffmval'] = [int(rec[1054])]
record['tsurval1'] = [int(rec[1055])]
record['tsurval2'] = [int(rec[1056])]
record['tdisval1'] = [int(rec[1057])]
record['tdisval2'] = [int(rec[1058])]
record['tretval1'] = [int(rec[1059])]
record['tretval2'] = [int(rec[1060])]
record['tint_val'] = [int(rec[1061])]
record['tdiv_val'] = [int(rec[1062])]
record['trnt_val'] = [int(rec[1063])]
record['ted_val'] = [int(rec[1064])]
record['tcsp_val'] = [int(rec[1065])]
record['tfin_val'] = [int(rec[1067])]
record['toi_val'] = [int(rec[1068])]
record['tphip_val'] = [int(rec[1069])]
record['tpotc_val'] = [int(rec[1070])]
record['tpmed_val'] = [int(rec[1071])]
record['tchsp_val'] = [int(rec[1072])]
return pd.DataFrame(record)
def create_cps(raw_cps):
"""
Function to start process of creating the CPS file
Parameters
----------
raw_cps: String containing path to CPS file in DAT format as downloaded
from the NBER website
Returns
-------
CPS file as a pandas DF
"""
# Read in CPS file
cps = [line.strip().split() for line in
open(raw_cps).readlines()]
# Empty list to hold the completed records
cps_list = []
print('Creating Records')
for record in tqdm(cps):
# Find the type of record
rectype = record[0][0]
if rectype == '1':
# If it's a household, hold that record to concat to family records
house_rec = h_recs(record[0])
elif rectype == '2':
# If it's a family record, concat to household record and store
house_fam = pd.concat([house_rec, f_recs(record[0])], axis=1,
sort=False)
else:
# If it's a person record, concat to household and family record
final_rec = pd.concat([house_fam, p_recs(record[0])], axis=1,
sort=False)
# Append final record to the list of records
cps_list.append(final_rec)
# Create the data set by combining all of the records
cps_mar = pd.concat(cps_list, sort=False)
# Export the data
print('Exporting Data')
cps_mar.to_csv('cpsmar2016.csv', index=False)
return cps_mar
| mit |
sit23/Isca | src/extra/python/isca/land_generator_fn.py | 4 | 11099 | # Function to allow land to be generated for a range of scenarios
# Land Options:
# 'square' (default) Square block of land with boundaries specified by boundaries keyword, a list of 4 floats in the form [S,N,W,E]
# 'continents_old' Choose continents from the original continent set-up adapted from the Sauliere 2012 paper (Jan 16), including North and South America, Eurasia, and Africa.
# 'continents' Choose continents from a newer continet set-up allowing addition of India, Australia, and South East Asia.
# If continents keyword is set to 'all' (default), then this will include all possibilities for the given set-up
# Alternatively, continents can be set to a list of strings containing options:
# NA - North America
# SA - South America
# EA - Eurasia
# AF - Africa
# OZ - Australia
# IN - India
# SEA - South East Asia
# Topography Options:
#'none' (default) Topography set to zero everywhere
#'sauliere2012' Choose mountains from Sauliere 2012 configuration using mountains keyword. Default is 'all', alternatively only 'rockys' or 'tibet' may be specified
#'gaussian' Use parameters specified in topo_gauss keyword to set up a Gaussian mountain. topo_gauss should be a list in the form: [central_lat,central_lon,radius_degrees,std_dev,height]
# Topography boundary options:
# If waterworld keyword is set to False (default), then topography can only be non-zero on continents - important as topography has a Gaussian structure and tends exponentially to zero.
# If waterworld keyword is set to True, aquamountains are possible - extra work needed here to deal with exponential issues!
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import os
def write_land(exp,land_mode='square',boundaries=[20.,60.,20.,60.],continents=['all'],topo_mode='none',mountains=['all'],topo_gauss=[40.,40.,20.,10.,3500.],waterworld=False):
# Common features of set-ups
# specify resolution
t_res = 42
#read in grid from approriate file
GFDL_BASE = os.environ['GFDL_BASE']
resolution_file = Dataset(GFDL_BASE + 'src/extra/python/scripts/gfdl_grid_files/t'+str(t_res)+'.nc', 'r', format='NETCDF3_CLASSIC')
lons = resolution_file.variables['lon'][:]
lats = resolution_file.variables['lat'][:]
lonb = resolution_file.variables['lonb'][:]
latb = resolution_file.variables['latb'][:]
nlon=lons.shape[0]
nlat=lats.shape[0]
topo_array = np.zeros((nlat,nlon))
land_array = np.zeros((nlat,nlon))
#make 2d arrays of latitude and longitude
lon_array, lat_array = np.meshgrid(lons,lats)
lonb_array, latb_array = np.meshgrid(lonb,latb)
#create dictionary for continents
cont_dic = {'NA':0, 'SA':1, 'EA':2, 'AF':3, 'OZ':4, 'IN':5, 'SEA':6}
# Firstly determine the land set-up to be used
# 1) Set-up in which a square of land is included
if land_mode=='square':
idx = (boundaries[0] <= lat_array) & (lat_array < boundaries[1]) & (boundaries[2] < lon_array) & (boundaries[3] > lon_array)
land_array[idx] = 1.0
# 2) Set-up in which some or all of 'original' continents are included
elif land_mode=='continents_old': #Older configuration of continents: Addition of India and SE Asia required some restructuring. This may be removed once obsolete.
idx_c = np.zeros((4,nlat,nlon), dtype=bool)
idx_c[0,:,:] = (103.-43./40.*(lon_array-180) < lat_array) & ((lon_array-180)*43./50. -51.8 < lat_array) &( lat_array < 60.) #North America
idx_c[1,:,:] = (737.-7.2*(lon_array-180) < lat_array) & ((lon_array-180)*10./7. + -212.1 < lat_array) &( lat_array < -22./45*(lon_array-180) +65.9) #South America
eurasia_pos = (17. <= lat_array) & (lat_array < 60.) & (-5. < lon_array) & ( 43./40.*lon_array -101.25 < lat_array)
eurasia_neg = (17. <= lat_array) & (lat_array < 60.) & (355. < lon_array)
idx_c[2,:,:] = eurasia_pos + eurasia_neg #Eurasia
africa_pos = (lat_array < 17.) & (-52./27.*lon_array + 7.37 < lat_array) & (52./38.*lon_array -65.1 < lat_array)
africa_neg = (lat_array < 17.) & (-52./27.*(lon_array-360) + 7.37 < lat_array)
idx_c[3,:,:] = africa_pos + africa_neg #Africa
if 'all' in continents:
idx = idx_c[0,:,:] + idx_c[1,:,:] + idx_c[2,:,:] + idx_c[3,:,:]
land_array[idx] = 1.
else:
idx = np.zeros((nlat,nlon), dtype=bool)
for cont in continents:
idx = idx + idx_c[cont_dic[cont],:,:]
land_array[idx] = 1.0
# 2) Set-up in which some or all of 'new' continents are included
elif land_mode=='continents':
idx_c = np.zeros((7,nlat,nlon), dtype=bool)
idx_c[0,:,:] = (103.-43./40.*(lon_array-180) < lat_array) & ((lon_array-180)*43./50. -51.8 < lat_array) &( lat_array < 60.) #North America
idx_c[1,:,:] = (737.-7.2*(lon_array-180) < lat_array) & ((lon_array-180)*10./7. + -212.1 < lat_array) &( lat_array < -22./45*(lon_array-180) +65.9) #South America
eurasia_pos = (23. <= lat_array) & (lat_array < 60.) & (-8. < lon_array) & ( 43./40.*lon_array -101.25 < lat_array)
eurasia_neg = (23. <= lat_array) & (lat_array < 60.) & (352. < lon_array)
idx_c[2,:,:] = eurasia_pos + eurasia_neg #Eurasia
africa_pos = (lat_array < 23.) & (-52./27.*lon_array + 7.59 < lat_array) & (52./38.*lon_array -65.1 < lat_array)
africa_neg = (lat_array < 23.) & (-52./27.*(lon_array-360) + 7.59 < lat_array)
idx_c[3,:,:] = africa_pos + africa_neg #Africa
idx_c[4,:,:] = (lat_array > - 35.) & (lat_array < -17.) & (lon_array > 115.) & (lon_array < 150.) #Australia
idx_c[5,:,:] = (lat_array < 23.) & (-15./8.*lon_array + 152 < lat_array) & (15./13.*lon_array - 81 < lat_array) #India
idx_c[6,:,:] = (lat_array < 23.) & ( 43./40.*lon_array -101.25 < lat_array) & (-14./13.*lon_array +120 < lat_array) #South East Asia
if 'all' in continents:
idx = idx_c[0,:,:] + idx_c[1,:,:] + idx_c[2,:,:] + idx_c[3,:,:] + idx_c[4,:,:] + idx_c[5,:,:] + idx_c[6,:,:]
land_array[idx] = 1.
else:
idx = np.zeros((nlat,nlon), dtype=bool)
for cont in continents:
idx = idx + idx_c[cont_dic[cont],:,:]
land_array[idx] = 1.
elif land_mode=='none':
land_array = np.zeros((nlat,nlon))
# Next produce a topography array
if topo_mode == 'none':
topo_array = np.zeros((nlat,nlon))
elif topo_mode == 'sauliere2012':
# Rockys from Sauliere 2012
h_0 = 2670.
central_lon = 247.5
central_lat = 40.
L_1 = 7.5
L_2 = 20.
gamma_1 = 42.
gamma_2 = 42.
delta_1 = ((lon_array - central_lon)*np.cos(np.radians(gamma_1)) + (lat_array - central_lat)*np.sin(np.radians(gamma_1)))/L_1
delta_2 = (-(lon_array - central_lon)*np.sin(np.radians(gamma_2)) + (lat_array - central_lat)*np.cos(np.radians(gamma_2)))/L_2
h_arr_rockys = h_0 * np.exp(-(delta_1**2. + delta_2**2.))
idx_rockys = (h_arr_rockys / h_0 > 0.05) #s make sure exponentials are cut at some point - use the value from p70 of Brayshaw's thesis.
#Tibet from Sauliere 2012
h_0 = 5700.
central_lon = 82.5
central_lat = 28
L_1 = 12.5
L_2 = 12.5
gamma_1 = -49.5
gamma_2 = -18.
delta_1 = ((lon_array - central_lon)*np.cos(np.radians(gamma_1)) + (lat_array - central_lat)*np.sin(np.radians(gamma_1)))/L_1
delta_2 = (-(lon_array - central_lon)*np.sin(np.radians(gamma_2)) + (lat_array - central_lat)*np.cos(np.radians(gamma_2)))/L_2
h_arr_tibet_no_amp = np.exp(-(delta_1**2.))*(1./delta_2)*np.exp(-0.5*(np.log(delta_2))**2.)
maxval = np.nanmax(h_arr_tibet_no_amp) #For some reason my maximum value of h_arr_tibet_no_amp > 1. Renormalise so h_0 sets amplitude.
h_arr_tibet = (h_arr_tibet_no_amp/maxval)*h_0
idx_tibet = (h_arr_tibet / h_0 > 0.05)
if 'all' in mountains:
topo_array[idx_rockys] = h_arr_rockys[idx_rockys]
topo_array[idx_tibet] = h_arr_tibet[idx_tibet]
elif 'rockys' in mountains:
topo_array[idx_rockys] = h_arr_rockys[idx_rockys]
elif 'tibet' in mountains:
topo_array[idx_tibet] = h_arr_tibet[idx_tibet]
else:
print('No valid mountain options detected for Sauliere 2012 topography')
elif topo_mode == 'gaussian':
#Options to define simple Gaussian Mountain
central_lat = topo_gauss[0]
central_lon = topo_gauss[1]
radius_degrees = topo_gauss[2]
std_dev = topo_gauss[3]
height = topo_gauss[4]
rsqd_array = np.sqrt((lon_array - central_lon)**2.+(lat_array - central_lat)**2.)
#generalise to ellipse - needs checking but may be useful later (RG)
#ax_rot = 1. #gradient of new x axis
#ax_rat = 2. #axis ratio a**2/b**2
#rsqd_array = np.sqrt((lon_array - central_lon + ax_rot*(lat_array - central_lat))**2.+ ax_rat*(lat_array - central_lat - ax_rot*(lon_array - central_lon))**2.)*np.cos(np.arctan(ax_rot))
#divide by factor of cos(atan(m)) to account for change in coords
idx = (rsqd_array < radius_degrees)
topo_array[idx] = height* np.exp(-(rsqd_array[idx]**2.)/(2.*std_dev**2.))
else:
print('Invalid topography option given')
if waterworld != True: #Leave flexibility to allow aquamountains!
idx = (land_array == 0.) & (topo_array != 0.)
topo_array[idx] = 0.
#Write land and topography arrays to file
topo_filename = GFDL_BASE + 'exp/' + exp + '/input/land.nc'
topo_file = Dataset(topo_filename, 'w', format='NETCDF3_CLASSIC')
lat = topo_file.createDimension('lat', nlat)
lon = topo_file.createDimension('lon', nlon)
latitudes = topo_file.createVariable('lat','f4',('lat',))
longitudes = topo_file.createVariable('lon','f4',('lon',))
topo_array_netcdf = topo_file.createVariable('zsurf','f4',('lat','lon',))
land_array_netcdf = topo_file.createVariable('land_mask','f4',('lat','lon',))
latitudes[:] = lats
longitudes[:] = lons
topo_array_netcdf[:] = topo_array
land_array_netcdf[:] = land_array
topo_file.close()
print('Output written to: ' + topo_filename)
#Show configuration on screen to allow checking
lon_0 = lons.mean()
lat_0 = lats.mean()
m = Basemap(lat_0=lat_0,lon_0=lon_0)
xi, yi = m(lon_array, lat_array)
plt.figure()
if land_mode != 'none':
m.contour(xi,yi,land_array)
if topo_mode != 'none':
cs = m.contourf(xi,yi,topo_array, cmap=plt.get_cmap('RdBu_r'))
cb = plt.colorbar(cs, shrink=0.5, extend='both')
plt.xticks(np.linspace(0,360,13))
plt.yticks(np.linspace(-90,90,7))
plt.show()
if __name__ == "__main__":
write_land('test',land_mode='continents')
| gpl-3.0 |
Winand/pandas | pandas/tests/indexing/test_partial.py | 5 | 20611 | """
test setting *parts* of objects both positionally and label based
TOD: these should be split among the indexer tests
"""
import pytest
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Panel, Index, date_range
from pandas.util import testing as tm
class TestPartialSetting(object):
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
pytest.raises(IndexError, f)
def f():
s.iat[3] = 5.
pytest.raises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
pytest.raises(IndexError, f)
def f():
df.iat[4, 2] = 5.
pytest.raises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
with catch_warnings(record=True):
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
with catch_warnings(record=True):
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
pytest.raises(ValueError, f)
# TODO: #15657, these are left as object and not coerced
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='object')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
pytest.raises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
pytest.raises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
with catch_warnings(record=True):
df.loc[100.0, :] = df.ix[0]
pytest.raises(TypeError, f)
def f():
with catch_warnings(record=True):
df.loc[100, :] = df.ix[0]
pytest.raises(TypeError, f)
def f():
with catch_warnings(record=True):
df.ix[100.0, :] = df.ix[0]
pytest.raises(TypeError, f)
def f():
with catch_warnings(record=True):
df.ix[100, :] = df.ix[0]
pytest.raises(ValueError, f)
# allow object conversion here
df = orig.copy()
with catch_warnings(record=True):
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
assert df.index.dtype == 'object'
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
pytest.raises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
pytest.raises(ValueError, f)
def f():
df.loc[:, 1] = 1
pytest.raises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
| bsd-3-clause |
mattions/TimeScales | branch_dist/import_digitezed_data.py | 1 | 4254 | # Author Michele Mattioni
# Fri Oct 23 15:41:58 BST 2009
import pylab
import numpy as np
from numpy import sin, exp
import matplotlib.pyplot as plt
from helpers.loader import Loader
class FitHandler(object):
"""Fit the data with a polynomial"""
def fit(self, data, terms):
polycoeffs = np.polyfit(data.x, data.y, terms)
poly = np.poly1d(polycoeffs)
return poly
def plot_poly(self, pfit):
plt.plot(data.x, pfit(data.x), label="poly %i" %pfit.order)
def fit_and_plot(self, data, order):
p = self.fit(data, order)
self.plot_poly(p)
return p
def plot_data(self, data):
plt.plot(data.x, data.y, 'k.', label="data")
plt.xlabel("Distance from the soma [um]")
plt.ylabel("Surface Area [um]/Dendritic Lenght [um^2]")
def integrate_till_value(self, x0, value, poly, increment,
scale_branch):
"""Integrate the polynomial from x0 to the value required
:Params:
x0: starting point for the integration
value: objective value to reach
poly: polynomial to use to calculate
:return:
x1: ending point of the integration
"""
delta = 0
x1 = x0
while value >= delta:
x1 += increment
delta = poly(x1)/scale_branch - poly(x0)/scale_branch
return (x1, delta)
def calc_spines_pos(self, cursor_list, x1_list):
"""Calculate the spines position, returning the mid point
of the interval from the two list."""
mid_points = []
for i, el in enumerate(cursor_list):
mid_point = cursor_list[i] + (x1_list[i] - cursor_list[i])/2
mid_points.append(mid_point)
return mid_points
if __name__ == "__main__":
from scipy.optimize import leastsq
data = pylab.csv2rec('spines_distribution_Wilson_1992.csv')
pfh = FitHandler()
pfh.plot_data(data)
order = 17
pfit = pfh.fit_and_plot(data, order)
plt.title("Fitting the data")
plt.legend()
plt.savefig("Fitted_data.png")
# Integrating
pInteg = pfit.integ()
plt.figure()
pfh.plot_poly(pInteg)
plt.title("Integral area")
# We get the area per number of branch (4):
scale_branch = 4
area_per_branch = pInteg(data.x)/scale_branch
plt.plot(data.x, area_per_branch, label='area branch')
plt.legend(loc=0)
plt.savefig('integral_area.png')
# Calculating the spine dimension
"""
Procedure to get this right:
- Compute the total surface from Wolf of all the spines
# 1525 spines total, 381 per branch
- Rescale the whole surface Wolf spines surface to the Wilson one
- Compute the spine equivalent surface Wilson2Wolf
- Integrate until the surface in the Wilson world match one spine surface
- take the (x_end - x_start)/2 position
- iterate
"""
spine_Wolf = 6.35 # um^2
total_number_spines = 1525
spines_per_branch = 381
total_Wolf = spines_per_branch * spine_Wolf
total_Wilson = pInteg(220)/scale_branch #Value of the integral at the last bit
# spine_Wolf : spine_Wilson = total_Wolf : total_Wilson
spine_Wilson = (spine_Wolf * total_Wilson)/ total_Wolf
increment =0.001
cursor = 0
cursor_list = []
x1_list = []
delta_list = []
print "Calculating the spines' position. It will take a bit."
while cursor <= data.x[-1]:
x1, delta = pfh.integrate_till_value(cursor, spine_Wilson, pInteg,
increment, scale_branch)
cursor_list.append(cursor)
x1_list.append(x1)
delta_list.append(delta)
cursor = x1 # Resetting the cursor to the x1
spines_pos = pfh.calc_spines_pos(cursor_list, x1_list)
plt.figure()
plt.hist(spines_pos, bins=30)
plt.title("spines distribution for branch")
#plt.savefig('spines_distribution.png')
#filename = 'spines_pos.pickle'
#l = Loader()
#l.save(spines_pos, '.', filename)
plt.show() | bsd-3-clause |
yw374cornell/e-mission-server | emission/tests/analysisTests/intakeTests/TestFilterAccuracy.py | 1 | 5036 | # Standard imports
import unittest
import datetime as pydt
import logging
import pymongo
import json
import bson.json_util as bju
import pandas as pd
# Our imports
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.pipeline_queries as epq
import emission.tests.common as etc
class TestFilterAccuracy(unittest.TestCase):
def setUp(self):
# We need to access the database directly sometimes in order to
# forcibly insert entries for the tests to pass. But we put the import
# in here to reduce the temptation to use the database directly elsewhere.
import emission.core.get_database as edb
import uuid
self.testUUID = uuid.uuid4()
self.entries = json.load(open("emission/tests/data/smoothing_data/tablet_2015-11-03"),
object_hook=bju.object_hook)
tsdb = edb.get_timeseries_db()
for entry in self.entries:
entry["user_id"] = self.testUUID
tsdb.save(entry)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
def tearDown(self):
import emission.core.get_database as edb
edb.get_timeseries_db().remove({"user_id": self.testUUID})
edb.get_pipeline_state_db().remove({"user_id": self.testUUID})
def testEmptyCallToPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
# Check call to check duplicate with a zero length dataframe
entry = unfiltered_points_df.iloc[5]
self.assertEqual(eaicf.check_prior_duplicate(pd.DataFrame(), 0, entry), False)
def testEmptyCall(self):
# Check call to the entire filter accuracy with a zero length timeseries
import emission.core.get_database as edb
edb.get_timeseries_db().remove({"user_id": self.testUUID})
# We expect that this should not throw
eaicf.filter_accuracy(self.testUUID)
self.assertEqual(len(self.ts.get_data_df("background/location")), 0)
def testCheckPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry = unfiltered_points_df.iloc[5]
unfiltered_appended_df = pd.DataFrame([entry] * 5).append(unfiltered_points_df).reset_index()
logging.debug("unfiltered_appended_df = %s" % unfiltered_appended_df[["fmt_time"]].head())
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 0, entry), False)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 5, entry), True)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_points_df, 5, entry), False)
def testConvertToFiltered(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
entry_copy = eaicf.convert_to_filtered(self.ts.get_entry_at_ts("background/location",
"metadata.write_ts",
entry_from_df.metadata_write_ts))
self.assertNotIn("_id", entry_copy)
self.assertEquals(entry_copy["metadata"]["key"], "background/filtered_location")
def testExistingFilteredLocation(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), False)
entry_copy = self.ts.get_entry_at_ts("background/location", "metadata.write_ts",
entry_from_df.metadata_write_ts)
self.ts.insert(eaicf.convert_to_filtered(entry_copy))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), True)
def testFilterAccuracy(self):
unfiltered_points_df = self.ts.get_data_df("background/location", None)
self.assertEqual(len(unfiltered_points_df), 205)
pre_filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(pre_filtered_points_df), 0)
eaicf.filter_accuracy(self.testUUID)
filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(filtered_points_df), 124)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| bsd-3-clause |
mpharrigan/mixtape | msmbuilder/lumping/pcca.py | 6 | 4084 | from __future__ import print_function, division, absolute_import
import numpy as np
from ..msm import MarkovStateModel
class PCCA(MarkovStateModel):
"""Perron Cluster Cluster Analysis (PCCA) for coarse-graining (lumping)
microstates into macrostates.
Parameters
----------
n_macrostates : int
The desired number of macrostates in the lumped model.
kwargs : optional
Additional keyword arguments to be passed to MarkovStateModel. See
msmbuilder.msm.MarkovStateModel for possible options.
Notes
-----
PCCA is a subclass of MarkovStateModel. However, the MSM properties
and attributes on PCCA refer to the MICROSTATE properties--e.g.
pcca.transmat_ is the microstate transition matrix. To get the
macrostate transition matrix, you must fit a new MarkovStateModel
object on the output (assignments) of PCCA().
"""
def __init__(self, n_macrostates, pcca_tolerance=1e-5, **kwargs):
self.n_macrostates = n_macrostates
self.pcca_tolerance = pcca_tolerance
super(PCCA, self).__init__(**kwargs)
def fit(self, sequences, y=None):
"""Fit a PCCA lumping model using a sequence of cluster assignments.
Parameters
----------
sequences : list(np.ndarray(dtype='int'))
List of arrays of cluster assignments
y : None
Unused, present for sklearn compatibility only.
Returns
-------
self
"""
super(PCCA, self).fit(sequences, y=y)
self._do_lumping()
return self
def _do_lumping(self):
"""Do the PCCA lumping.
Notes
-------
1. Iterate over the eigenvectors, starting with the slowest.
2. Calculate the spread of that eigenvector within each existing
macrostate.
3. Pick the macrostate with the largest eigenvector spread.
4. Split the macrostate based on the sign of the eigenvector.
"""
# Extract non-perron eigenvectors
right_eigenvectors = self.right_eigenvectors_[:, 1:]
assert self.n_states_ > 0
microstate_mapping = np.zeros(self.n_states_, dtype=int)
def spread(x):
return x.max() - x.min()
for i in range(self.n_macrostates - 1):
v = right_eigenvectors[:, i]
all_spreads = np.array([spread(v[microstate_mapping == k])
for k in range(i + 1)])
state_to_split = np.argmax(all_spreads)
inds = ((microstate_mapping == state_to_split) &
(v >= self.pcca_tolerance))
microstate_mapping[inds] = i + 1
self.microstate_mapping_ = microstate_mapping
def partial_transform(self, sequence, mode='clip'):
trimmed_sequence = super(PCCA, self).partial_transform(sequence, mode)
if mode == 'clip':
return [self.microstate_mapping_[seq] for seq in trimmed_sequence]
elif mode == 'fill':
def nan_get(x):
try:
x = int(x)
return self.microstate_mapping_[x]
except ValueError:
return np.nan
return np.asarray([nan_get(x) for x in trimmed_sequence])
else:
raise ValueError
@classmethod
def from_msm(cls, msm, n_macrostates):
"""Create and fit lumped model from pre-existing MSM.
Parameters
----------
msm : MarkovStateModel
The input microstate msm to use.
n_macrostates : int
The number of macrostates
Returns
-------
lumper : cls
The fit PCCA(+) object.
"""
params = msm.get_params()
lumper = cls(n_macrostates, **params)
lumper.transmat_ = msm.transmat_
lumper.populations_ = msm.populations_
lumper.mapping_ = msm.mapping_
lumper.countsmat_ = msm.countsmat_
lumper.n_states_ = msm.n_states_
lumper._do_lumping()
return lumper
| lgpl-2.1 |
boada/ICD | sandbox/lowerSN/test.py | 2 | 5244 | #!/usr/bin/env python
import pylab as pyl
from mpl_toolkits.axes_grid1 import AxesGrid
import cPickle as pickle
from colsort import colsort
def plot_uvj_vs_icd():
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: galaxy.ICD_IH != None, galaxies)
galaxies = filter(lambda galaxy: galaxy.sersic != None and \
galaxy.ston_I > 30, galaxies)
#Upper and Lower limit arrow verts
arrowup_verts = [[0.,0.], [-1., -1], [0.,0.],
[0.,-2.], [0.,0.], [1,-1]]
#arrowdown_verts = [[0.,0.], [-1., 1], [0.,0.],
# [0.,2.], [0.,0.], [1, 1]]
F = pyl.figure(1,figsize=(8,3))
grid = AxesGrid(F, 111,
nrows_ncols=(1,4),
axes_pad = 0.1,
add_all=True,
aspect=False,
share_all = True)
ax1 = grid[0]
ax2 = grid[1]
ax3 = grid[2]
ax4 = grid[3]
for galaxy in galaxies:
if galaxy.sersic < 1.:
col1 =ax1.scatter(galaxy.Mass, galaxy.Color_grad,
s=25, c='0.8', edgecolor='0.8')
if 1. < galaxy.sersic < 2.:
col2 =ax2.scatter(galaxy.Mass, galaxy.Color_grad,
s=25, c='0.8', edgecolor='0.8')
if 2. < galaxy.sersic < 3.:
col3 =ax3.scatter(galaxy.Mass, galaxy.Color_grad,
s=25, c='0.8', edgecolor='0.8')
if 3. < galaxy.sersic:
if galaxy.Color_grad < 50:
col4 =ax4.scatter(galaxy.Mass, galaxy.Color_grad,
s=25, c='0.8', edgecolor='0.8')
else:
col4 = ax4.scatter(galaxy.Mass, 50, marker=None, s=100,
verts=arrowup_verts)
# Add the box and whiskers
galaxies1 = filter(lambda galaxy: galaxy.ston_I > 30. and \
galaxy.sersic < 1, galaxies)
galaxies1 = pyl.asarray(galaxies1)
galaxies2 = filter(lambda galaxy: galaxy.ston_I > 30. and \
1 < galaxy.sersic < 2, galaxies)
galaxies2 = pyl.asarray(galaxies2)
galaxies3 = filter(lambda galaxy: galaxy.ston_I > 30. and \
2 < galaxy.sersic < 3, galaxies)
galaxies3 = pyl.asarray(galaxies3)
galaxies4 = filter(lambda galaxy: galaxy.ston_I > 30. and \
3 < galaxy.sersic, galaxies)
galaxies4 = pyl.asarray(galaxies4)
x1 = [galaxy.Mass for galaxy in galaxies1]
x2 = [galaxy.Mass for galaxy in galaxies2]
x3 = [galaxy.Mass for galaxy in galaxies3]
x4 = [galaxy.Mass for galaxy in galaxies4]
ll = 8.5
ul= 12
bins_x =pyl.arange(8.5, 12.5, 0.5)
grid1 = []
grid2 = []
grid3 = []
grid4 = []
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
cond=[cond1 and cond2 for cond1, cond2 in zip(x1>=xmin, x1<xmax)]
grid1.append(galaxies1.compress(cond))
cond=[cond1 and cond2 for cond1, cond2 in zip(x2>=xmin, x2<xmax)]
grid2.append(galaxies2.compress(cond))
cond=[cond1 and cond2 for cond1, cond2 in zip(x3>=xmin, x3<xmax)]
grid3.append(galaxies3.compress(cond))
cond=[cond1 and cond2 for cond1, cond2 in zip(x4>=xmin, x4<xmax)]
grid4.append(galaxies4.compress(cond))
icd1 = []
icd2 = []
icd3 = []
icd4 = []
for i in range(len(grid1)):
icd1.append([galaxy.Color_grad for galaxy in grid1[i]])
icd2.append([galaxy.Color_grad for galaxy in grid2[i]])
icd3.append([galaxy.Color_grad for galaxy in grid3[i]])
icd4.append([galaxy.Color_grad for galaxy in grid4[i]])
from boxplot_percentile import percentile_box_plot as pbp
bp1 = pbp(ax1, icd1, indexer=list(pyl.delete(bins_x,-1)+0.25))
bp2 = pbp(ax2, icd2, indexer=list(pyl.delete(bins_x,-1)+0.25))
bp3 = pbp(ax3, icd3, indexer=list(pyl.delete(bins_x,-1)+0.25))
bp4 = pbp(ax4, icd4, indexer=list(pyl.delete(bins_x,-1)+0.25))
ax1.set_xticks([8, 9, 10, 11])
ax2.set_xticks([8, 9, 10, 11])
ax3.set_xticks([8, 9, 10, 11])
ax4.set_xticks([8, 9, 10, 11])
# ax1.set_xlim(8, 12)
# ax2.set_xlim(8, 12)
# ax3.set_xlim(8, 12)
# ax4.set_xlim(8, 12)
# ax1.set_ylim(0, 50)
# ax2.set_ylim(0, 50)
# ax3.set_ylim(0, 50)
# ax4.set_ylim(0, 50)
ax1.set_ylabel(r'$\xi[i_{775},H_{160}]$ (%)')
ax1.set_title('n < 1')
ax2.set_title('1 < n < 2')
ax3.set_title('2 < n < 3')
ax4.set_title('3 < n')
pyl.figtext(.5, .05, r'Log Mass $(M_{\odot})$',fontsize=18,
horizontalalignment='center')
ax1.axhline(0, lw=2, zorder=0)
ax2.axhline(0, lw=2, zorder=0)
ax3.axhline(0, lw=2, zorder=0)
ax4.axhline(0, lw=2, zorder=0)
import matplotlib.font_manager
line1 = pyl.Line2D([], [], marker='o', mfc='0.8', mec='0.8', markersize=8,
linewidth=0)
line2 = pyl.Line2D([], [], marker='s', mec='blue', mfc='None',
markersize=10, linewidth=0, markeredgewidth=2)
line3 = pyl.Line2D([], [], color='r', linewidth=2)
prop = matplotlib.font_manager.FontProperties(size='small')
ax3.legend((line1, line2, line3), ('Data', 'Quartiles',
'Medians'), 'upper center', prop=prop, ncol=1)
pyl.tight_layout()
pyl.subplots_adjust(bottom=0.21, left=0.11)
pyl.show()
if __name__ =='__main__':
plot_uvj_vs_icd()
| mit |
interrogator/corpkit | corpkit/tokenise.py | 1 | 6993 | from __future__ import print_function
"""
Tokenise, POS tag and lemmatise a corpus, returning CONLL-U data
"""
def nested_list_to_pandas(toks):
"""
Turn sent/word tokens into Series
"""
import pandas as pd
index = []
words = []
for si, sent in enumerate(toks, start=1):
for wi, w in enumerate(sent, start=1):
index.append((si, wi))
words.append(w)
ix = pd.MultiIndex.from_tuples(index)
ser = pd.Series(words, index=ix)
ser.name = 'w'
return ser
def pos_tag_series(ser, tagger):
"""
Create a POS tag Series from token Series
"""
import nltk
import pandas as pd
tags = [i[-1] for i in nltk.pos_tag(ser.values)]
tagser = pd.Series(tags, index=ser.index)
tagser.name = 'p'
return tagser
def lemmatise_series(words, postags, lemmatiser):
"""
Create a lemma Series from token+postag Series
"""
import nltk
import pandas as pd
tups = zip(words.values, postags.values)
lemmata = []
tag_convert = {'j': 'a'}
for word, tag in tups:
tag = tag_convert.get(tag[0].lower(), tag[0].lower())
if tag in ['n', 'a', 'v', 'r']:
lem = lemmatiser.lemmatize(word, tag)
else:
lem = word
lemmata.append(lem)
lems = pd.Series(lemmata, index=words.index)
lems.name = 'l'
return lems
def write_df_to_conll(df, newf, plain=False, stripped=False,
metadata=False, speaker_segmentation=False, offsets=False):
"""
Turn a DF into CONLL-U text, and write to file, newf
"""
import os
from corpkit.constants import OPENER, PYTHON_VERSION
from corpkit.conll import get_speaker_from_offsets
outstring = ''
sent_ixs = set(df.index.labels[0])
for si in sent_ixs:
si = si + 1
#outstring += '# sent_id %d\n' % si
if metadata:
metad = get_speaker_from_offsets(stripped,
plain,
offsets[si - 1],
metadata_mode=metadata,
speaker_segmentation=speaker_segmentation)
for k, v in sorted(metad.items()):
outstring += '# %s=%s\n' % (k, v)
sent = df.loc[si]
csv = sent.to_csv(None, sep='\t', header=False)
outstring += csv + '\n'
try:
os.makedirs(os.path.dirname(newf))
except OSError:
pass
with OPENER(newf, 'w') as newf:
if PYTHON_VERSION == 2:
outstring = outstring.encode('utf-8', errors='ignore')
newf.write(outstring)
def new_fname(oldpath, inpath):
"""
Determine output filename
"""
import os
newf, ext = os.path.splitext(oldpath)
newf = newf + '.conll'
if '-stripped' in newf:
return newf.replace('-stripped', '-tokenised')
else:
return newf.replace(inpath, inpath + '-tokenised')
def process_meta(data, speaker_segmentation, metadata):
import re
from corpkit.constants import MAX_SPEAKERNAME_SIZE
meta_offsets = []
if metadata and speaker_segmentation:
reg = re.compile(r'(^.{,%d}?:\s| <metadata (.*?)>)')
elif metadata and not speaker_segmentation:
reg = re.compile(r' <metadata (.*?)>')
elif not metadata and not speaker_segmentation:
reg = re.compile(r'^.{,%d}?:\s' % MAX_SPEAKERNAME_SIZE)
#splt = re.split(reg, data)
if speaker_segmentation or metadata:
for i in re.finditer(reg, data):
meta_offsets.append((i.start(), i.end()))
for s, e in reversed(meta_offsets):
data = data[:s] + data[e:]
return data, meta_offsets
def plaintext_to_conll(inpath,
postag=False,
lemmatise=False,
lang='en',
metadata=False,
outpath=False,
nltk_data_path=False,
speaker_segmentation=False):
"""
Take a plaintext corpus and sent/word tokenise.
:param inpath: The corpus to read in
:param postag: do POS tagging?
:param lemmatise: do lemmatisation?
:param lang: choose language for pos/lemmatiser (not implemented yet)
:param metadata: add metadata to conll (not implemented yet)
:param outpath: custom name for the resulting corpus
:param speaker_segmentation: did the corpus has speaker names?
"""
import nltk
import shutil
import pandas as pd
from corpkit.process import saferead
from corpkit.build import get_filepaths
fps = get_filepaths(inpath, 'txt')
# IN THE SECTIONS BELOW, WE COULD ADD MULTILINGUAL
# ANNOTATORS, PROVIDED THEY BEHAVE AS THE NLTK ONES DO
# SENT TOKENISERS
from nltk.tokenize.punkt import PunktSentenceTokenizer
stoker = PunktSentenceTokenizer()
s_tokers = {'en': stoker}
sent_tokenizer = s_tokers.get(lang, stoker)
# WORD TOKENISERS
tokenisers = {'en': nltk.word_tokenize}
tokeniser = tokenisers.get(lang, nltk.word_tokenize)
# LEMMATISERS
if lemmatise:
from nltk.stem.wordnet import WordNetLemmatizer
lmtzr = WordNetLemmatizer()
lemmatisers = {'en': lmtzr}
lemmatiser = lemmatisers.get(lang, lmtzr)
# POS TAGGERS
if postag:
# nltk.download('averaged_perceptron_tagger')
postaggers = {'en': nltk.pos_tag}
tagger = postaggers.get(lang, nltk.pos_tag)
# iterate over files, make df of each, convert this
# to conll and sent to new filename
for f in fps:
for_df = []
data, enc = saferead(f)
plain, enc = saferead(f.replace('-stripped', ''))
#orig_data = data
#data, offsets = process_meta(data, speaker_segmentation, metadata)
#nest = []
sents = sent_tokenizer.tokenize(data)
soffs = sent_tokenizer.span_tokenize(data)
toks = [tokeniser(sent) for sent in sents]
ser = nested_list_to_pandas(toks)
for_df.append(ser)
if postag or lemmatise:
postags = pos_tag_series(ser, tagger)
if lemmatise:
lemma = lemmatise_series(ser, postags, lemmatiser)
for_df.append(lemma)
for_df.append(postags)
else:
if postag:
for_df.append(postags)
df = pd.concat(for_df, axis=1)
fo = new_fname(f, inpath)
write_df_to_conll(df, fo,
metadata=metadata,
plain=plain,
stripped=data,
speaker_segmentation=speaker_segmentation,
offsets=soffs)
nsent = len(set(df.index.labels[0]))
print('%s created (%d sentences)' % (fo, nsent))
if '-stripped' in inpath:
return inpath.replace('-stripped', '-tokenised')
else:
return inpath + '-tokenised'
| mit |
JT5D/scikit-learn | examples/mixture/plot_gmm_selection.py | 8 | 3193 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import pylab as pl
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = pl.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(pl.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
pl.xticks(n_components_range)
pl.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
pl.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
pl.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = pl.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
pl.xlim(-10, 10)
pl.ylim(-3, 6)
pl.xticks(())
pl.yticks(())
pl.title('Selected GMM: full model, 2 components')
pl.subplots_adjust(hspace=.35, bottom=.02)
pl.show()
| bsd-3-clause |
limeng12/seqan | apps/ngs_roi/tool_shed/roi_report.py | 17 | 3890 | #!/usr/bin/env python
"""Create ROI overview report.
This report consists of plots of all metrics (y: metric, x: rank of value).
Each plot is written out as a PNG file and we also create one output HTML file
that shows all HTML files.
Plotting is done using the fine matplotlib.
"""
from __future__ import print_function
__author__ = 'Manuel Holtgrewe <[email protected]>'
__copyright__ = 'Copyring 2013, Freie Universitaet Berlin'
__license__ = 'BSD 3-clause'
import sys
import os
import os.path
try:
import argparse
except ImportError:
import argparse26 as argparse
import Cheetah.Template
import matplotlib.pyplot as plt
import ngs_roi.io
import ngs_roi.app
import ngs_roi.argparse
# The HTML template to use for generating the HTML page.
REPORT_TPL = """
<html>
<head>
<title>ROI Report</title>
</head>
<body>
<h1>ROI Report</h1>
<h2>Table of Contents</h2>
<ul>
#for figure in $figures
<li><a href="#$figure.slug">$figure.title</a></li>
#end for
</ul>
<h2>Plots</h2>
#for figure in $figures
<h3 id="$figure.slug">$figure.title</h3>
<img src="$figure.file_name" title="$figure.title" />
#end for
</body>
</html>
"""
class ReportBuilder(ngs_roi.app.App):
"""This class is used for building the report."""
def __init__(self, args):
self.args = args
self.in_file = self.args.in_file
self.out_file = self.args.out_file
self.out_dir = self.args.out_dir
self.prepareOutDir()
def plotAndWrite(self, file_name, numbers, ylabel):
"""Create plot of numbers and write as PNG.
:param file_name:path to write plot to as image
:param numbers:list of numbers to plot
:param ylabel:label for y axis
"""
plt.figure()
plt.plot(numbers)
plt.ylabel(ylabel)
plt.savefig(file_name)
def run(self):
# Load ROI.
print('Loading ROI...', file=sys.stderr)
records = ngs_roi.io.load(self.in_file)
keys = records[0].data_keys
# Create ROI plots.
print('Creating plots...', file=sys.stderr)
METRICS = [('start position', 'start_pos', lambda x: x.start_pos),
('end position', 'end_pos', lambda x: x.end_pos),
('region length', 'region_length', lambda x: x.region_length),
('max count', 'max_count', lambda x: x.max_count)]
def getData(i):
def func(x):
try:
res = float(x.data[i])
except ValueError:
res = 0
return res
return func
for i, key in enumerate(keys):
slug = ''.join(x for x in key if x.isalnum())
METRICS.append((key, slug, getData(i)))
figure_infos = []
for title, slug, func in METRICS:
values = [func(x) for x in records]
file_name = 'report_%s.png' % slug
file_path = os.path.join(self.out_dir, file_name)
self.plotAndWrite(file_path, sorted(values), title)
figure_infos.append({'file_name': file_name, 'title': title, 'slug': slug})
# Create report HTML.
name_space = {'figures': figure_infos}
t = Cheetah.Template.Template(REPORT_TPL, searchList=name_space)
with open(os.path.join(self.out_dir, 'index.html'), 'wb') as f:
f.write(str(t))
with open(os.path.join(self.out_file), 'wb') as f:
f.write(str(t))
def main():
"""Program entry point."""
parser = argparse.ArgumentParser(description='Create ROI report.')
ngs_roi.argparse.addFileArguments(parser)
args = parser.parse_args()
ngs_roi.argparse.applyFileDefaults(args)
report_builder = ReportBuilder(args)
return report_builder.run()
if __name__ == '__main__':
sys.exit(main()) | bsd-3-clause |
cybernet14/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
Titan-C/scikit-learn | sklearn/semi_supervised/label_propagation.py | 7 | 16693 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = sparse.csgraph.laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
hksonngan/astir.firework | utils/image.py | 1 | 16456 | #!/usr/bin/env pythonfx
#
# This file is part of FIREwork
#
# FIREwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIREwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FIREwork. If not, see <http://www.gnu.org/licenses/>.
#
# FIREwork Copyright (C) 2008 - 2011 Julien Bert
# ==== Image ================================
# ===========================================
# open image
def image_open(name):
from PIL import Image
from sys import exit
from os.path import splitext
import numpy
filename, ext = splitext(name)
if ext == '.png' or ext == '.tif' or ext == '.bmp' or ext == '.jpg':
im = Image.open(name)
w, h = im.size
mode = im.mode
if mode == 'RGB' or mode == 'RGBA' or mode == 'LA':
im = im.convert('L')
data = numpy.fromstring(im.tostring(), 'uint8')
data = data.reshape((h, w))
data = image_int2float(data)
elif ext == '.im':
f = open(name, 'rb')
data = numpy.fromfile(f, 'float32')
f.close()
ny = data[0]
nx = data[1]
data = data[2:]
data = data.reshape(ny, nx)
return data
# export image
def image_write(im, name):
from PIL import Image
from os.path import splitext
from numpy import array
filename, ext = splitext(name)
slice = im.copy()
if ext == '.png' or ext == '.tif' or ext == '.bmp' or ext == '.jpg':
slice -= slice.min()
slice /= slice.max()
ny, nx = slice.shape
slice = slice * 255
slice = slice.astype('uint8')
pilImage = Image.frombuffer('L', (nx, ny), slice, 'raw', 'L', 0, 1)
pilImage.save(name)
elif ext == '.im':
# FIREwork image format
ny, nx = slice.shape
slice = slice.reshape((ny*nx))
slice = slice.tolist()
slice.insert(0, nx)
slice.insert(0, ny)
slice = array(slice, 'float32')
slice.tofile(name)
# export image with map color
def image_write_mapcolor(im, name, color='jet'):
from numpy import array, zeros, take, ones
from PIL import Image
ny, nx = im.shape
npix = ny * nx
map = im.copy()
map = map.astype('float32')
map -= map.min()
map /= map.max()
map *= 255
map = map.astype('uint8')
map = map.reshape(npix)
lutr = zeros((256), 'uint8')
lutg = zeros((256), 'uint8')
lutb = zeros((256), 'uint8')
if color == 'jet':
up = array(range(0, 255, 3), 'uint8')
dw = array(range(255, 0, -3), 'uint8')
stp = 85
lutr[stp:2*stp] = up
lutr[2*stp:] = 255
lutg[0:stp] = up
lutg[stp:2*stp] = 255
lutg[2*stp:3*stp] = dw
lutb[0:stp] = 255
lutb[stp:2*stp] = dw
elif color == 'hot':
up = array(range(0, 255, 3), 'uint8')
stp = 85
lutr[0:stp] = up
lutr[stp:] = 255
lutg[stp:2*stp] = up
lutg[2*stp:] = 255
lutb[2*stp:3*stp] = up
lutb[3*stp:] = 255
elif color == 'pet':
up2 = array(range(0, 255, 4), 'uint8') # 64
up3 = array(range(0, 255, 8), 'uint8') # 32
dw = array(range(255, 0, -8), 'uint8') # 32
lutr[0:64] = 0
lutg[0:64] = 0
lutb[0:64] = up2
lutr[64:128] = up2
lutg[64:128] = 0
lutb[64:128] = 255
lutr[128:160] = 255
lutg[128:160] = 0
lutb[128:160] = dw
lutr[160:224] = 255
lutg[160:224] = up2
lutb[160:224] = 0
lutr[224:256] = 255
lutg[224:256] = 255
lutb[224:256] = up3
else: # hsv kind default
up = array(range(0, 255, 5), 'uint8')
dw = array(range(255, 0, -5), 'uint8')
stp = 51
lutr[0:stp] = dw
lutr[3*stp:4*stp] = up
lutr[4*stp:] = 255
lutg[0:2*stp] = 255
lutg[2*stp:3*stp] = dw
lutb[stp:2*stp] = up
lutb[2*stp:4*stp] = 255
lutb[4*stp:5*stp] = dw
matr = take(lutr, map)
matg = take(lutg, map)
matb = take(lutb, map)
mata = ones((npix), 'uint8') * 255
newim = zeros((npix*4), 'uint8')
newim[0:4*npix:4] = matr
newim[1:4*npix:4] = matg
newim[2:4*npix:4] = matb
newim[3:4*npix:4] = mata
pilImage = Image.frombuffer('RGBA', (nx, ny), newim, 'raw', 'RGBA', 0, 1)
pilImage.save(name)
# get the 1D projection of an image
def image_projection(im, axe = 'x'):
if axe == 'y': return im.sum(axis = 1)
elif axe == 'x': return im.sum(axis = 0)
# get the 1D slice of an image
def image_slice(im, x1, y1, x2, y2):
from numpy import array
# line based on DDA algorithm
length = 0
length = abs(x2 - x1)
if abs(y2 - y1) > length: length = abs(y2 - y1)
xinc = float(x2 - x1) / float(length)
yinc = float(y2 - y1) / float(length)
x = x1 + 0.5
y = y1 + 0.5
vec = []
for i in xrange(length):
vec.append(im[int(y), int(x)])
x += xinc
y += yinc
return array(vec, 'float32')
# some info to images
def image_infos(im):
sh = im.shape
print 'size: %ix%i min %f max %f mean %f std %f' % (sh[0], sh[1], im.min(), im.max(), im.mean(), im.std())
# normalize image ave=0, std=1
def image_normalize(im):
ave = im.mean()
std = im.std()
im = (im - ave) / std
return im
# convert int to float
def image_int2float(im):
im = im.astype('float32')
im /= 255.0
return im
# compute fft of image
def image_fft(im):
from numpy import fft
l, w = im.shape
if l != w:
print 'Image must be square!'
return -1
imf = fft.fft2(im)
imf = fft.fftshift(imf)
return imf
# compute ifft of image
def image_ifft(imf):
from numpy import fft
l, w = imf.shape
if l!= w:
print 'Image must be square!'
return -1
im = fft.ifft2(imf)
im = abs(im)
im = im.astype('float32')
return im
# compute power spectrum of image
def image_pows(im):
imf = image_fft(im)
imf = imf * imf.conj()
imf = imf.real
l, w = im.shape
imf /= (float(l * w))
return imf
# compute dB values
def image_atodB(im):
from numpy import log10
return 20 * log10(1+im)
# compute logscale image
def image_logscale(im):
from numpy import log10
return log10(1 + im)
# compute periodogram
def image_periodogram(im):
imf = image_fft(im)
imf = abs(imf)**2
l, w = im.shape
imf /= float(l * w)
return imf
# create noise (gauss or poisson model)
def image_noise(ny, nx, sigma, model='gauss'):
from numpy import zeros
from random import gauss
if model=='gauss':
im = zeros((ny, nx), 'float32')
mu = 0.0
for i in xrange(ny):
for j in xrange(nx):
im[i, j] = gauss(mu, sigma)
return im
elif model=='poisson':
from random import random
from math import log
im = zeros((ny, nx), 'float32')
for i in xrange(ny):
for j in xrange(nx):
im[i, j] = sigma * -log(random())
return im
# Compute RAPS, radial averaging power spectrum
def image_raps(im):
from numpy import array
lo, wo = im.shape
im = image_pows(im)
l, w = im.shape
c = (w - 1) // 2
val = image_ra(im)
freq = range(0, c + 1) # TODO should be wo // 2 + 1 coefficient need to fix!!
freq = array(freq, 'float32')
freq /= float(wo)
return val, freq
# Compute RA, radial average of image
def image_ra(im):
from numpy import zeros
l, w = im.shape
c = (w - 1) // 2
rmax = c
val = zeros((rmax + 1), 'float32')
ct = zeros((rmax + 1), 'float32')
val[0] = im[c, c] # central value
ct[0] = 1.0
for i in xrange(c - rmax, c + rmax + 1):
di = i - c
if abs(di) > rmax: continue
for j in xrange(c - rmax, c + rmax + 1):
dj = j - c
r = (di*di + dj*dj)**(0.5)
ir = int(r)
if ir > rmax: continue
cir = ir + 1
frac = r - ir
cfrac = 1 - frac
val[ir] += (cfrac * im[j, i])
ct[ir] += cfrac
if cir <= rmax:
val[cir] += (frac * im[j, i])
ct[cir] += frac
'''
val[ir] += (cfrac * im[j, i])
if cfrac != 0.0: ct[ir] += cfrac
if cir <= rmax:
val[cir] += (frac * im[j, i])
if frac != 0.0: ct[cir] += frac
'''
val /= ct
return val
# Compute FRC curve (Fourier Ring Correlation)
def image_frc(im1, im2):
from numpy import zeros, array
if im1.shape != im2.shape:
print 'Images must have the same size!'
return -1
wo, ho = im1.shape
#im1 = image_normalize(im1)
#im2 = image_normalize(im2)
imf1 = image_fft(im1)
imf2 = image_fft(im2)
imf2c = imf2.conj()
w, h = imf1.shape
c = (w - 1) // 2
rmax = c
fsc = zeros((rmax + 1), 'float32')
nf1 = zeros((rmax + 1), 'float32')
nf2 = zeros((rmax + 1), 'float32')
# center
fsc[0] = abs(imf1[c, c] * imf2c[c, c])
nf1[0] = abs(imf1[c, c])**2
nf2[0] = abs(imf2[c, c])**2
# over all rings
for i in xrange(c-rmax, c+rmax+1):
for j in xrange(c-rmax, c+rmax+1):
r = ((i-c)*(i-c) + (j-c)*(j-c))**(0.5)
ir = int(r)
if ir > rmax: continue
cir = ir + 1
frac = r - ir
cfrac = 1 - frac
ifsc = imf1[i, j] * imf2c[i, j]
inf1 = abs(imf1[i, j])**2
inf2 = abs(imf2[i, j])**2
fsc[ir] += (cfrac * ifsc)
nf1[ir] += (cfrac * inf1)
nf2[ir] += (cfrac * inf2)
if cir <= rmax:
fsc[cir] += (frac * ifsc)
nf1[cir] += (frac * inf1)
nf2[cir] += (frac * inf2)
fsc = fsc / (nf1 * nf2)**0.5
freq = range(rmax + 1) # TODO should be 0, wo // 2 + 1) need to fix!!
freq = array(freq, 'float32')
freq /= float(wo)
return fsc, freq
# rotate image with 90 deg
def image_rot90(im):
from numpy import rot90
return rot90(im)
# flip left to rigth an image
def image_flip_lr(im):
from numpy import fliplr
return fliplr(im)
# flip up to down an image
def image_flip_ud(im):
from numpy import flipud
return flipud(im)
# Compute ZNCC between 2 images
def image_zncc(i1, i2):
from numpy import sqrt, sum
im1 = i1.copy()
im2 = i2.copy()
im1 -= im1.mean()
im2 -= im2.mean()
s1 = sqrt(sum(im1*im1))
s2 = sqrt(sum(im2*im2))
return sum(im1 * im2) / (s1 * s2)
# Compute ZNCC between 2 images under a mask
def image_zncc_mask(i1, i2, mask):
from numpy import sqrt, sum
im1 = i1.copy()
im2 = i2.copy()
v1 = image_pick_undermask(im1, mask)
v2 = image_pick_undermask(im2, mask)
b1 = v1 - v1.mean()
b2 = v2 - v2.mean()
s1 = sqrt(sum(b1*b1))
s2 = sqrt(sum(b2*b2))
return sum(b1 * b2) / (s1 * s2)
# Compute SNR based on ZNCC
def image_snr_from_zncc(signal, noise):
from math import sqrt
ccc = image_zncc(signal, noise)
snr = sqrt(ccc / (1 - ccc))
return snr
# Compute SNR based on ZNCC under a mask
def image_snr_from_zncc_mask(signal, noise, mask):
from math import sqrt
ccc = image_zncc_mask(signal, noise, mask)
snr = sqrt(ccc / (1 - ccc))
return snr
# Create a 2D mask circle
def image_mask_circle(ny, nx, rad):
from numpy import zeros, sqrt
cy = ny // 2
cx = nx // 2
m = zeros((ny, nx), 'float32')
for y in xrange(ny):
for x in xrange(nx):
r = ((y-cy)*(y-cy) + (x-cx)*(x-cx))**(0.5)
if r > rad: continue
m[y, x] = 1.0
return m
# Create a 2D mask square
def image_mask_square(ny, nx, c):
from numpy import zeros
cy = ny // 2
cx = nx // 2
m = zeros((ny, nx), 'float32')
for y in xrange(ny):
for x in xrange(nx):
if abs(y-cy) > c or abs(x-cx) > c: continue
m[y, x] = 1.0
return m
# Create a 2D mask with edge of a square
def image_mask_edge_square(ny, nx, c):
m1 = image_mask_square(ny, nx, c)
m2 = image_mask_square(ny, nx, max((c-1), 0))
return m1 - m2
# Create a 2D mire based on edge square
def image_mire_edge_square(ny, nx, step):
from numpy import zeros
im = zeros((ny, nx), 'float32')
n = min(ny, nx)
hn = n // 2
im[ny//2, nx//2] = 1.0
for i in xrange(step, hn, step):
im += image_mask_edge_square(ny, nx, i)
return im
# Get statistics values from a circle ROI on an image
def image_stats_ROI_circle(im, cx, cy, rad):
from numpy import array, zeros
val = []
ny, nx = im.shape
ROI = zeros((ny, nx), 'float32')
for y in xrange(ny):
for x in xrange(nx):
r = ((y-cy)*(y-cy) + (x-cx)*(x-cx))**(0.5)
if r > rad: continue
val.append(im[y, x])
ROI[y, x] = 1.0
val = array(val, 'float32')
return ROI, val.min(), val.max(), val.mean(), val.std()
# Get statiscitcs values under a specified mask
def image_stats_mask(im, mask):
from numpy import zeros
npix = mask.sum()
val = zeros((npix), 'float32')
ny, nx = mask.shape
ct = 0
for y in xrange(ny):
for x in xrange(nx):
if mask[y, x] == 1.0:
val[ct] = im[y, x]
ct += 1
return val.min(), val.max(), val.mean(), val.std(), val.sum(), npix
# Get values under a mask
def image_pick_undermask(im, mask):
from numpy import zeros
npix = mask.sum()
val = zeros((npix), 'float32')
ny, nx = mask.shape
ct = 0
for y in xrange(ny):
for x in xrange(nx):
if mask[y, x] == 1.0:
val[ct] = im[y, x]
ct += 1
return val
# Compute image centroid
def image_centroid(im):
ny, nx = im.shape
M00, M01, M10 = 0, 0, 0
for y in xrange(ny):
for x in xrange(nx):
i = im[y, x]
M00 += i
M01 += (y * i)
M10 += (x * i)
return M10 / float(M00), M01 / float(M00)
# Stitch two images in one
def image_stitch(im1, im2):
from numpy import zeros
ny1, nx1 = im1.shape
ny2, nx2 = im2.shape
res = zeros((max(ny1, ny2), nx1+nx2), 'float32')
res[0:ny1, 0:nx1] = im1
res[0:ny2, nx1:nx1+nx2] = im2
return res
# Threshold an image (up version)
def image_threshold_up(im, th, val):
from numpy import where
ind = where(im >= th)
im[ind] = val
return im
# Threshold an image (down version)
def image_threshold_down(im, th, val):
from numpy import where
ind = where(im <= th)
im[ind] = val
return im
# display an image
def image_show(mat, map='gray'):
import matplotlib.pyplot as plt
h, w = mat.shape
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.imshow(mat, interpolation='nearest', cmap=map)
ax.set_title('Viewer - FIREwork : %i x %i' % (w, h))
min = mat.min()
max = mat.max()
d = (max - min) / 9.0
lti = [i*d+min for i in xrange(10)]
txt = ['%5.3f' % lti[i] for i in xrange(10)]
cbar = fig.colorbar(cax, ticks=lti)
cbar.ax.set_yticklabels(txt)
plt.show()
# Get input values from an image
def image_ginput(im, n, map='gray'):
from numpy import array
import matplotlib.pyplot as plt
#plt.figure(figsize=(1.4, 1.4))
plt.figure(0)
plt.imshow(im, interpolation='nearest', cmap=map)
plt.colorbar()
pts = plt.ginput(n)
plt.show()
plt.close(0)
return array(pts, 'float32')
| gpl-3.0 |
davidgardenier/frbpoppy | frbpoppy/frbcatpoppy.py | 1 | 4720 | """Do things with frbcat."""
import os
import pandas as pd
from frbcat import Frbcat as PureFrbcat
from frbpoppy.misc import pprint
from frbpoppy.paths import paths
from frbpoppy.population import Population
class Frbcat(PureFrbcat):
"""
Add frbpoppy functionality to Frbcat.
Get the pandas dataframe with Frbcat().df
"""
def __init__(self, frbpoppy=True, repeat_bursts=False, mute=False,
**kwargs):
"""Initialize."""
super().__init__(self, path=paths.frbcat(), **kwargs)
# Transform the data
if frbpoppy is True:
self.frbpoppify()
self.match_surveys(interrupt=not mute)
# Just for neating up
self.df = self.df.sort_values('utc', ascending=False)
self.df = self.df.reindex(sorted(self.df.columns), axis=1)
def frbpoppify(self):
"""Prep data for frbpoppy."""
# Conversion table
convert = {'mw_dm_limit': 'dm_mw',
'width': 'w_eff',
'flux': 's_peak',
'redshift_host': 'z',
'spectral_index': 'si',
'dispersion_smearing': 't_dm',
'dm_error': 'dm_err',
'scattering_timescale': 't_scat',
'sampling_time': 't_samp'}
self.df.rename(columns=convert, inplace=True)
# Add some extra columns
self.df['fluence'] = self.df['s_peak'] * self.df['w_eff']
self.df['population'] = 'frbcat'
# Gives somewhat of an idea of the pulse width upon arrival at Earth
self.df['w_arr'] = (self.df['w_eff']**2 -
self.df['t_dm']**2 -
self.df['t_scat']**2 -
self.df['t_samp']**2)**0.5
def match_surveys(self, interrupt=True):
"""Match up frbs with surveys."""
# Merge survey names
surf = os.path.join(self.path, 'paper_survey.csv')
self._surveys = pd.read_csv(surf)
cols = ['frb_name', 'pub_description']
self.df = pd.merge(self.df, self._surveys, on=cols, how='left')
# Clean up possible unnamed columns
self.df = self.df.loc[:, ~self.df.columns.str.contains('unnamed')]
# Add single survey instruments
# Bit rough, but will work in a pinch
def cond(t):
return (self.df.telescope == t) & (self.df.survey.isnull())
self.df.at[cond('wsrt-apertif'), 'survey'] = 'wsrt-apertif'
self.df.at[cond('askap'), 'survey'] = 'askap-incoh'
self.df.at[cond('chime'), 'survey'] = 'chime'
self.df.at[cond('srt'), 'survey'] = 'srt'
self.df.at[cond('effelsberg'), 'survey'] = 'effelsberg'
self.df.at[cond('gbt'), 'survey'] = 'guppi'
self.df.at[cond('fast'), 'survey'] = 'crafts'
# Check whether any FRBs have not yet been assigned
no_surveys = self.df['survey'].isnull()
if interrupt and any(no_surveys):
cols = ['pub_description', 'frb_name']
ns_df = self.df[no_surveys].drop_duplicates(subset=cols,
keep='first')
pprint('It seems there are new FRBs!')
m = " - Frbcat doesn't know which *survey* was running when the "
m += "FRB was seen"
pprint(m)
m = " - To use these recent detections, please link the FRB to a "
m += "survey by:"
pprint(m)
pprint(' - Adding these frbs to {}'.format(surf))
for i, r in ns_df[cols].iterrows():
title, name = r
if isinstance(title, str):
title = title.replace('\n', '')
print(f'"{title}","{name}",""')
def to_pop(self, df=None):
"""
Convert to a Population object.
Please ensure self.clean() has been run first.
"""
if not isinstance(df, pd.DataFrame):
df = self.df
pop = Population()
pop.name = 'frbcat'
frbs = pop.frbs
frbs.name = df.name.values
frbs.dm = df.dm.values
frbs.dm_mw = df.dm_mw.values
frbs.gl = df.gl.values
frbs.gb = df.gb.values
frbs.ra = df.ra.values
frbs.dec = df.dec.values
frbs.z = df.z.values
frbs.t_scat = df.t_scat.values
frbs.w_eff = df.w_eff.values
frbs.si = df.si.values
frbs.snr = df.snr.values
frbs.s_peak = df.s_peak.values
frbs.fluence = df.fluence.values
# frbs.time = df.utc.values
pop.save()
return pop
if __name__ == '__main__':
import IPython
f = Frbcat()
IPython.embed()
| mit |
jereze/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
keflavich/APEX_CMZ_H2CO | shfi_otf_pipeline/make_apex_cubes.py | 1 | 135677 | import pyspeckit
from pyspeckit.spectrum.readers import read_class
from pyspeckit import cubes
import numpy as np
from astropy import wcs
from astropy import coordinates
from astropy import units as u
from astropy import constants
try:
from .progressbar import ProgressBar
except:
from astropy.utils.console import ProgressBar
from astropy.convolution import convolve, Gaussian1DKernel, Gaussian2DKernel
from sdpy import makecube
from astropy.io import fits
from astropy.stats.funcs import mad_std
from FITS_tools import cube_regrid
from FITS_tools.load_header import get_cd
from astropy.wcs import WCS
import FITS_tools
import scipy.ndimage
import scipy.linalg
import time
from astropy.time import Time
import mpl_plot_templates
import pylab as pl
import os
import errno
from astropy import log
import glob
from scipy.ndimage import filters
from scipy import signal,interpolate
import warnings
import image_tools
import spectral_cube
from spectral_cube import SpectralCube,BooleanArrayMask
import matplotlib
from lines import all_lines
import paths
matplotlib.rc_file(paths.pcpath('pubfiguresrc'))
# http://www.apex-telescope.org/heterodyne/shfi/calibration/calfactor/
# Apparently all data taken on MPI and ESO time in the first half of 2014 were
# off by 15-25%. The data need to be multiplied by these factors.
calibration_factors = {'2014-04-23:2014-06-13': 0.875, # from Sgr B2
'2014-02-01:2014-04-23': 0.78,
None: 1,
}
datasets_ao = ['O-085.F-9311A-2010_merge','E-085.B-0964A-2010_merge']
datasets_2013 = ['M-091.F-0019-2013-2013-06-08',
'M-091.F-0019-2013-2013-06-11',
'M-091.F-0019-2013-2013-06-12',
'M-091.F-0019-2013-2013-06-13']
datasets_2014 = {
'E-093.C-0144A.2014APR02/E-093.C-0144A-2014-2014-04-01': ('MAP_001',),
'E-093.C-0144A.2014APR03/E-093.C-0144A-2014-2014-04-02': ('MAP_001',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-04-24': ('MAP_115','MAP_116',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-04-30': ('MAP_116',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-02': ('MAP_116',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-04': ('MAP_115','MAP_116',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-08': ('MAP_117','MAP_118',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-09': ('MAP_119','MAP_118',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-10': ('MAP_120','MAP_121','MAP_119',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-11': ('MAP_121','MAP_122','MAP_123','MAP_124',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-12': ('MAP_055','MAP_056','MAP_124',),
'M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-13': ('MAP_031','MAP_032','MAP_057','MAP_058',),
'E-093.C-0144A.2014MAY30/E-093.C-0144A-2014-2014-05-29': ('MAP_002','MAP_003','MAP_004'),
'E-093.C-0144A.2014MAY31/E-093.C-0144A-2014-2014-05-30': ('MAP_005','MAP_006'),
'E-093.C-0144A.2014JUN01/E-093.C-0144A-2014-2014-05-31': ('MAP_007',),
'E-093.C-0144A.2014JUN02/E-093.C-0144A-2014-2014-06-01': ('MAP_009','MAP_010','MAP_008',),
'E-093.C-0144A.2014JUN03/E-093.C-0144A-2014-2014-06-02': ('MAP_011','MAP_012','MAP_013', 'MAP_018', 'MAP_019'),
'E-093.C-0144A.2014JUN06/E-093.C-0144A-2014-2014-06-05': ('Map_020', 'Map_021', 'Map_022', 'Map_023', 'Map_024', 'Map_025'),
# There is some corrupt data in 06-06
'E-093.C-0144A.2014JUN07/E-093.C-0144A-2014-2014-06-06': ('Map_001', 'Map_026', 'Map_027', 'Map_028', 'Map_029', 'Map_030'),
# should be 05-07: map117
'M-093.F-0009-2014-2014-07-10':['MAP_031', 'MAP_030'],
'M-093.F-0009-2014-2014-07-11':['MAP_029', 'MAP_030'],
'M-093.F-0009-2014-2014-07-12':['MAP_028', 'MAP_029'],
'M-093.F-0009-2014-2014-07-13':['MAP_028', 'MAP_026', 'MAP_027', 'MAP_024', 'MAP_025'],
'M-093.F-0009-2014-2014-07-14':['MAP_024','MAP_025'],
'M-093.F-0009-2014-2014-07-19':['MAP_024',],
'M-093.F-0009-2014-2014-07-20':['MAP_024',],
'E-093.C-0144A.2014JUL29/E-093.C-0144A-2014-2014-07-28': ['MAP_002', 'MAP_001'],
'E-093.C-0144A.2014JUL29/E-093.C-0144A-2014-2014-07-29': ['MAP_002',],
'E-093.C-0144A.2014JUL30/E-093.C-0144A-2014-2014-07-29': ['MAP_004', 'MAP_002', 'MAP_003'],
'E-093.C-0144A.2014JUL31/E-093.C-0144A-2014-2014-07-30': ['MAP_005', 'MAP_006'],
'E-093.C-0144A.2014AUG01/E-093.C-0144A-2014-2014-07-31': ['MAP_006', 'MAP_007', 'MAP_008', 'MAP_009', 'MAP_012', 'MAP_011', 'MAP_010'],
'E-093.C-0144A.2014AUG01/E-093.C-0144A-2014-2014-08-01': ['MAP_013',],
'E-093.C-0144A.2014AUG02/E-093.C-0144A-2014-2014-08-01': ['MAP_013', 'MAP_018'],
'E-093.C-0144A.2014AUG09/E-093.C-0144A-2014-2014-08-07': ['MAP_024', 'MAP_022', 'MAP_023', 'MAP_025'],
'E-093.C-0144A.2014AUG09/E-093.C-0144A-2014-2014-08-08': ['MAP_027', 'MAP_026'],
'E-093.C-0144A.2014OCT07/E-093.C-0144A-2014-2014-10-06':['MAP_020', 'MAP_019', 'MAP_021'],
'E-093.C-0144A.2014OCT06/E-093.C-0144A-2014-2014-10-05':['MAP_020'],
'E-093.C-0144A.2014OCT08/E-093.C-0144A-2014-2014-10-07':['MAP_056', 'MAP_022', 'MAP_031', 'MAP_023', 'MAP_019', 'MAP_032'],
'E-093.C-0144A.2014OCT09/E-093.C-0144A-2014-2014-10-08':['MAP_022', 'MAP_117', 'MAP_122', 'MAP_057', 'MAP_121'],
'E-093.C-0144A.2014OCT11/E-093.C-0144A-2014-2014-10-10':['MAP_011', 'MAP_058', 'MAP_018'],
'E-093.C-0144A.2014OCT12/E-093.C-0144A-2014-2014-10-11':['MAP_005', 'MAP_011', 'MAP_030', 'MAP_028'],
'E-093.C-0144A.2014OCT13/E-093.C-0144A-2014-2014-10-12':['MAP_001'],
'E-095.C-0242A.2015APR24/E-095.C-0242A-2015-2015-04-23':['MAP_032','MAP_123','MAP_124','MAP_118','MAP_031'],
'E-095.C-0242A.2015APR25/E-095.C-0242A-2015-2015-04-24':['MAP_123','MAP_124','MAP_118','MAP_115','MAP_055','MAP_031'],
'E-095.C-0242A.2015APR26/E-095.C-0242A-2015-2015-04-25':['MAP_115','MAP_019','MAP_116','MAP_122'],
'E-095.C-0242A.2015APR27/E-095.C-0242A-2015-2015-04-26':['MAP_056','MAP_021'],
'E-095.C-0242A.2015MAY04/E-095.C-0242A-2015-2015-05-03':['MAP_056'],
}
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-04-24 ['MAP_115', 'MAP_116']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-04-30 ['MAP_116']
#E-093.C-0144A.2014JUN01/E-093.C-0144A-2014-2014-05-31 ['MAP_007']
#E-093.C-0144A.2014APR03/E-093.C-0144A-2014-2014-04-02 ['MAP_001']
#E-093.C-0144A.2014JUN02/E-093.C-0144A-2014-2014-06-01 ['MAP_008', 'MAP_009', 'MAP_010']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-09 ['MAP_118', 'MAP_119']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-08 ['MAP_118', 'MAP_117']
#E-093.C-0144A.2014JUN03/E-093.C-0144A-2014-2014-06-02 ['MAP_013', 'MAP_012', 'MAP_011', 'MAP_019', 'MAP_018']
#E-093.C-0144A.2014MAY31/E-093.C-0144A-2014-2014-05-30 ['MAP_005', 'MAP_006']
#E-093.C-0144A.2014MAY30/E-093.C-0144A-2014-2014-05-29 ['MAP_004', 'MAP_002', 'MAP_003']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-02 ['MAP_116']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-04 ['MAP_115', 'MAP_116']
#E-093.C-0144A.2014APR02/E-093.C-0144A-2014-2014-04-01 ['MAP_001']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-10 ['MAP_119', 'MAP_121', 'MAP_120']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-11 ['MAP_123', 'MAP_122', 'MAP_121', 'MAP_124']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-12 ['MAP_124', 'MAP_056', 'MAP_055']
#M-093.F-0009-2014-2014-04/M-093.F-0009-2014-2014-05-13 ['MAP_031', 'MAP_058', 'MAP_057', 'MAP_032']
june2013datapath = '/Users/adam/work/h2co/apex/june2013/raw/M-091.F-0019-2013/'
june2013path = '/Users/adam/work/h2co/apex/june2013/'
april2014path = '/Users/adam/work/h2co/apex/april2014/'
h2copath = '/Users/adam/work/h2co/apex/h2co_cubes/'
mergepath = '/Users/adam/work/h2co/apex/merged_datasets/'
molpath = os.path.join(mergepath, 'molecule_cubes/')
aorawpath = '/Users/adam/work/h2co/apex/2010_reduced/2010_raw/'
aopath = '/Users/adam/work/h2co/apex/2010_reduced/'
diagplotdir = '/Users/adam/work/h2co/apex/diagnostic_plots/'
bright_lines = {k:all_lines[k] for k in
['H2CO_303_202', 'H2CO_322_221', 'H2CO_321_220', 'SiO_54',
'CH3OH_422_312', 'C18O', '13CO']}
bandwidths = {'H2CO_303_202':25,
'H2CO_322_221':25,
'H2CO_321_220':25,
'SiO_54':25,
'CH3OH_422_312':25,
'CH3OH_514_422':25,
'CH3OH_633_716':25,
'HCCCH_65': 25,
'OCS_18_17':25,
'CH3OCHO_17_16':25,
'C18O':75,
'13CO':75,
#'H2S 2(2,0)-2(1,1)': 216.71044, ??
}
lines218 = {x:v for x,v in all_lines.items()
if 'H2CO' in x or 'CH3OH_422_312' in x}
all_apexfiles = ([os.path.join(june2013datapath, k)+".apex"
for k in datasets_2013] +
[os.path.join(april2014path, k)+".apex"
for k in datasets_2014] +
[os.path.join(aorawpath, k)+".apex"
for k in datasets_ao]
)
def mkdir_p(path):
""" http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python """
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def checkdir_makedir(path):
dpath = os.path.split(path)[0]
if not os.path.exists(dpath) and dpath:
mkdir_p(dpath)
def debug_and_load(test='test'):
spectra,headers,indices,data,hdrs,gal = load_dataset_for_debugging(skip_data=False, lowhigh='high')
make_blanks_freq(gal, hdrs[0], test, clobber=True)
dmeansub,gal,hdrs = process_data(data, gal, hdrs, dataset=test,
subspectralmeans=True, scanblsub=False)
add_apex_data(dmeansub, hdrs, gal, test, retfreq=True, varweight=True,)
dscube = cube_regrid.downsample_cube(fits.open(test+".fits")[0], factor=4)
dscube.writeto(test+"_ds.fits",clobber=True)
make_blanks_freq(gal, hdrs[0], test+"_blsub", clobber=True)
dspecsub,gal,hdrs = process_data(data, gal, hdrs, dataset=test+"_blsub",
subspectralmeans=True, scanblsub=True)
add_apex_data(dspecsub, hdrs, gal, test+"_blsub", retfreq=True, varweight=True,)
dscube = cube_regrid.downsample_cube(fits.open(test+"_blsub.fits")[0], factor=4)
dscube.writeto(test+"_blsub_ds.fits",clobber=True)
make_blanks_freq(gal, hdrs[0], test+"_pcasub", clobber=True)
dpcasub,gal,hdrs = process_data(data, gal, hdrs, dataset=test+"_pcasub",
subspectralmeans=True, scanblsub=True,
pca_clean=True, pcakwargs={})
add_apex_data(dpcasub, hdrs, gal, test+"_pcasub", retfreq=True, varweight=True,)
dscube = cube_regrid.downsample_cube(fits.open(test+"_pcasub.fits")[0], factor=4)
dscube.writeto(test+"_pcasub_ds.fits",clobber=True)
freq = hdr_to_freq(hdrs[0])
mask = make_line_mask(freq)
return spectra,headers,indices,data,hdrs,gal,dspecsub,dmeansub,dpcasub,freq,mask
def load_dataset_for_debugging(lowhigh='low', downsample_factor=8,
dataset='M-091.F-0019-2013-2013-06-11',
datapath=june2013datapath,
xscan=37986,
sourcename='SGRA',
shapeselect=4096,
backend='xffts',
skip_data=True):
"""
Example:
spectra,headers,indices, data,hdrs,gal = load_dataset_for_debugging(skip_data=False)
make_blanks_freq(gal, hdrs[0], 'test', clobber=True)
noise = np.std(data,axis=1)
freq_step = np.array([h['FRES'] for h in hdrs])
exptime = np.array([h['EXPOSURE'] for h in hdrs])
tsys = np.array([h['TSYS'] for h in hdrs])
diagplot(data, tsys, noise, 'test')
add_apex_data(data, hdrs, gal, cubefilename, retfreq=True, varweight=True,)
"""
if lowhigh not in ('low','high'):
raise ValueError
if backend == 'xffts':
xtel = 'AP-H201-X202' if lowhigh=='low' else 'AP-H201-X201'
else:
xtel = 'AP-H201-F101' if lowhigh == 'high' else 'AP-H201-F102'
apex_filename=datapath+dataset+".apex"
spectra,headers,indices = load_apex_cube(apex_filename,
downsample_factor=downsample_factor,
xtel=xtel,
sourcename=sourcename)
data, hdrs, gal = select_apex_data(spectra, headers, indices,
sourcename=sourcename,
shapeselect=shapeselect,
tsysrange=[100,325],
xtel=xtel,
rchanrange=None,
xscan=xscan,
skip_data=skip_data)
return spectra,headers,indices, data,hdrs,gal
def get_sourcenames(headers):
return list(set([h['SOURC'].strip() for h in headers]))
def load_apex_cube(apex_filename='data/E-085.B-0964A-2010.apex',
skip_data=False, DEBUG=False, downsample_factor=None,
sourcename=None, xtel=None,
memmap=True, **kwargs):
found_data = read_class.read_class(apex_filename,
downsample_factor=downsample_factor,
sourcename=sourcename, telescope=xtel,
**kwargs)
return found_data
def select_apex_data(spectra,headers,indices, sourcename=None,
shapeselect=None, tsysrange=None, rchanrange=None,
xscan=None,
xtel=None,
skip_data=False,
dont_flag_sgrb2=True,
galactic_coordinate_range=[[-2,2],[-2,2]]):
log.info("Determining RA/Dec")
ra,dec = zip(*[(h['RA']+h['RAoff']/np.cos(h['DEC']/180.*np.pi),
h['DEC']+h['DECoff']) for h in headers])
log.info("Determining Galactic coordinates")
gal = coordinates.SkyCoord(np.array(ra)*u.deg,
np.array(dec)*u.deg,
frame='icrs').galactic
#gal.l.wrap_angle = 180*u.deg
if galactic_coordinate_range is not None:
(lmin,lmax),(bmin,bmax) = galactic_coordinate_range
galOK = ((gal.l.wrap_at(180*u.deg).deg > lmin) &
(gal.l.wrap_at(180*u.deg).deg < lmax) &
(gal.b.deg > bmin) &
(gal.b.deg < bmax))
else:
galOK = True
sourceOK = True
#if isinstance(sourcename, (list,tuple)):
# sourceOK = np.array([h['SOURC'].strip() in sourcename for h in headers])
#elif sourcename is not None:
# sourceOK = np.array([h['SOURC'].strip()==sourcename for h in headers])
#else:
# sourceOK = True
if xscan is not None:
xscanOK = np.array([h['SCAN']==xscan for h in headers])
else:
xscanOK = True
xtelOK = True
#if xtel is not None:
# xtelOK = np.array([h['XTEL'].strip()==xtel for h in headers])
#else:
# xtelOK = True
if tsysrange is not None:
tsys = np.array([h['TSYS'] for h in headers])
tsysOK = (tsys>tsysrange[0]) & (tsys<tsysrange[1])
if dont_flag_sgrb2:
sgrb2 = ((gal.l.wrap_at(180*u.deg).deg > 0.64) &
(gal.l.wrap_at(180*u.deg).deg<0.7) &
(gal.b.deg>-0.06) &
(gal.b.deg<-0.01))
tsysOK[sgrb2] = True
else:
tsysOK = True
if rchanrange is not None:
rchan = np.array([h['RCHAN'] if 'RCHAN' in h else np.inf for h in headers])
rchanOK = (rchan>rchanrange[0]) & (rchan<rchanrange[1])
else:
rchanOK = True
mostOK = galOK & sourceOK & tsysOK & rchanOK & xtelOK & xscanOK
if not skip_data:
log.info("Shaping data")
data1 = np.array(spectra)
shapes = np.array([d.shape for d in data1])
if shapeselect is not None:
OKshapes = (shapes == shapeselect).squeeze()
elif len(np.unique(shapes[mostOK])) > 1:
raise ValueError("Inconsistent shapes.")
else:
OKshapes = True
else:
OKshapes = True
allOK = mostOK & OKshapes
if allOK.sum() == 0:
raise ValueError("Data selection yielded empty. Sourcename={0}".format(sourcename))
if skip_data:
data = None
else:
data = np.array(data1[allOK].tolist())
hdrs = [h for h,K in zip(headers,allOK) if K]
gal = gal[allOK]
return data,hdrs,gal
def process_data(data, gal, hdrs, dataset, scanblsub=False,
subspectralmeans=True, verbose=False, noisefactor=3.0,
linemask=False, automask=2,
zero_edge_pixels=0,
subtract_time_average=False,
pca_clean=False,
timewise_pca=True,
pcakwargs={},
**kwargs):
timeaxis = 0
freqaxis = 1
log.info("Processing {0}".format(dataset))
if zero_edge_pixels:
# Force the Nth first/last frequency pixels to zero
data[:,:zero_edge_pixels] = 0
data[:,-zero_edge_pixels:] = 0
# flag extremely bad pixels (don't know where these come from, scary!)
extremely_bad = (data > 1e10) | (data < -1e10)
# Set to zero rather than nan to avoid masking-related issues below
data[extremely_bad] = 0
if subspectralmeans:
data = data - data.mean(axis=freqaxis)[:,None]
obsids = np.array([h['SCAN'] for h in hdrs])
# for plotting and masking, determine frequency array
freq = hdr_to_freq(hdrs[0])
scans = identify_scans_fromcoords(gal)
if scanblsub:
data_diagplot(data, dataset+"_presub", scans=scans, freq=freq,
**kwargs)
for ii,xscan in enumerate(np.unique(obsids)):
match = obsids == xscan
# maybe mask=mask_pix.max(axis=timeaxis), ?
#mask=mask_pix[ii],
data_diagplot(data[match], dataset+"_presub_obs%i" % xscan,
freq=freq, **kwargs)
if linemask:
mask = make_line_mask(freq)
else:
mask = None
dsub,mask_pix = subtract_scan_linear_fit(data, scans, mask_pixels=mask,
verbose=verbose,
automask=automask,
smooth_all=True,
return_mask=True)
if len(mask_pix) == 0:
mask = None
else:
mask = mask_pix.max(axis=timeaxis).astype('bool')
elif subtract_time_average:
# subtracting mean spectrum from all spectra
dsub = data - data.mean(axis=timeaxis)
mask = None
else:
mask = None
dsub = data
if pca_clean:
t0 = time.time()
if timewise_pca:
dsub = PCA_clean(dsub.T, smoothing_scale=False,
diagplotfilename=os.path.join(diagplotdir,
dataset+"_time_pca_diagnostic.png"),
**pcakwargs).T
else:
# DON'T remove the mean: that's dealt with in 'spectral baselining' in
# a more conservative fashion
dmean = dsub.mean(axis=0)
dsub = PCA_clean(dsub-dmean,
diagplotfilename=os.path.join(diagplotdir,
dataset+"_pca_diagnostic.png"),
**pcakwargs) + dmean
log.info("PCA cleaning took {0} seconds".format(time.time()-t0))
# Standard Deviation can be fooled by obscene outliers
#noise = MAD(dsub,axis=freqaxis)
noise = np.std(dsub,axis=freqaxis)
freq_step = np.array([h['FRES'] for h in hdrs])
exptime = np.array([h['EXPOSURE'] for h in hdrs])
tsys = np.array([h['TSYS'] for h in hdrs])
# 2 for 2 polarizations; otherwise this is Wilson 2009 eqn 4.41
theoretical_rms = tsys/(2.*np.abs(freq_step*1.0e6)*exptime)**0.5
# extra factor 3.0 to avoid overflagging; this means flagging
# only 3-sigma outliers.
bad = noise > (theoretical_rms*noisefactor)
# SgrB2 has higher noise. Don't flag it out.
sgrb2 = ((gal.l.wrap_at(180*u.deg).deg > 0.64) &
(gal.l.wrap_at(180*u.deg).deg<0.7) &
(gal.b.deg>-0.06) &
(gal.b.deg<-0.01))
bad[sgrb2] = False
# pre-flagging diagnostic
diagplot(dsub, tsys, noise, dataset+"_preflag", freq=freq, mask=mask,
scans=scans, **kwargs)
if np.count_nonzero(bad) == bad.size:
import ipdb; ipdb.set_trace()
raise ValueError("All data will be flagged out; something is amiss.")
dsub = dsub[True-bad]
obsids = obsids[True-bad]
tsys = tsys[True-bad]
noise = noise[True-bad]
gal = gal[True-bad]
hdrs = [h for h,b in zip(hdrs,bad) if not b]
log.info("Flagged out %i bad values (%0.1f%%)." % (bad.sum(),bad.sum()/float(bad.size)))
diagplot(dsub, tsys, noise, dataset, freq=freq, mask=mask, scans=scans,
**kwargs)
for xscan in np.unique(obsids):
match = obsids == xscan
diagplot(dsub[match], tsys[match], noise[match],
dataset+"_obs%i" % xscan, freq=freq, mask=mask, **kwargs)
return dsub,gal,hdrs
def classheader_to_fitsheader(header, axisnumber=1):
header['CRPIX{0}'.format(axisnumber)] = header['RCHAN']
header['CRVAL{0}'.format(axisnumber)] = header['VOFF']
header['CDELT{0}'.format(axisnumber)] = header['VRES']
header['RESTFRQ'.format(axisnumber)] = header['RESTF']
header['CUNIT{0}'.format(axisnumber)] = 'km s-1'
hdr = fits.Header()
for k in header:
if k == 'DATEOBS':
hdr[k] = header[k].datetime.isoformat()
elif isinstance(header[k], (np.ndarray, list, tuple)):
for ii,val in enumerate(header[k]):
hdr[k[:7]+str(ii)] = val
else:
hdr[k[:8]] = header[k]
hdr['TREC'] = 0
#hdr.insert(axisnumber+2, ('NAXIS{0}'.format(axisnumber), header['DATALEN']))
#assert hdr.cards[3][0] == 'NAXIS1'
return hdr
def hdr_to_freq(h):
freqarr = ((np.arange(h['NCHAN'])+1-h['RCHAN']) * h['FRES'] +
h['FOFF'] + h['RESTF'])
return freqarr
def hdr_to_velo(h):
veloarr = (np.arange(h['NCHAN'])+1-h['RCHAN']) * h['VRES'] + h['VOFF']
return veloarr
def add_apex_data(data, hdrs, gal, cubefilename, noisecut=np.inf,
retfreq=False, excludefitrange=None, varweight=True,
debug=False, kernel_fwhm=10./3600.):
if debug and log.level > 10:
log.level = 10
log.info("Data shape: {}. Next step is gridding.".format(data.shape))
if data.ndim != 2:
raise ValueError('Data shape is NOT ok.')
if data.shape[0] != len(hdrs):
raise ValueError('Data and headers do not match')
if data.shape[0] != len(gal):
raise ValueError('Data and coords od not match')
def data_iterator(data=data, continuum=False, fsw=False):
shape0 = data.shape[0]
for ii in xrange(shape0):
#for ii in xrange(1000):
yield data[ii,:]
# as defined on http://www.apex-telescope.org/heterodyne/shfi/het230/lines/
linefreq = 218222.192
def velo_iterator(data=None, linefreq=linefreq, headers=hdrs):
for h in headers:
if retfreq:
freqarr = hdr_to_freq(h)
#veloarr = ((freqarr-linefreq)/linefreq * constants.c).to(u.km/u.s).value
# needs to be in hz
yield freqarr*1e6*u.Hz
else:
veloarr = hdr_to_velo(h)
yield veloarr*u.km/u.s
def coord_iterator(data=None, coordsys_out='galactic', gal=gal):
for c in gal:
yield c.l.deg, c.b.deg
nhits = cubefilename+"_nhits.fits"
flatheader = fits.getheader(nhits)
cubeheader = fits.getheader(cubefilename+".fits")
makecube.add_data_to_cube(cubefilename+".fits", data=data,
flatheader=flatheader,
cubeheader=cubeheader, linefreq=218.22219,
allow_smooth=True,
nhits=nhits,
data_iterator=data_iterator,
coord_iterator=coord_iterator,
velo_iterator=velo_iterator,
progressbar=True, coordsys='galactic',
velocity_offset=0.0, negative_mean_cut=None,
add_with_kernel=True, kernel_fwhm=kernel_fwhm,
fsw=False,
diagnostic_plot_name=None, chmod=False,
default_unit=u.GHz if retfreq else u.km/u.s,
smoothto=2,
noisecut=noisecut,
excludefitrange=None,
varweight=varweight,
continuum_prefix=None)
def add_pipeline_parameters_to_file(fileprefix, pipeline_type, **kwargs):
if not os.path.exists(fileprefix+".fits"):
return False
f = fits.open(fileprefix+".fits")
f[0].header['PIPECALL'] = (pipeline_type,'build_cube function called')
for ii,(k,v) in enumerate(kwargs.items()):
try:
kw = ('P{pipetype:_<4s}K{n:02d}'.format(n=ii,
pipetype=pipeline_type[:4])
.upper())
keypair = "{k}:{v}".format(k=k, v=v)
f[0].header[kw] = keypair
except Exception as ex:
log.warning("Header could not be updated with key/value pair"
"{k}:{v}. Error: {ex}".format(k=k, v=v, ex=ex))
f.writeto(fileprefix+".fits", clobber=True, output_verify='fix')
def add_pipeline_header_data(header):
header['PIPELINE'] = 'Ginsburg 2014 SHFI OTF Pipeline'
header['TELESCOP'] = 'APEX'
header['INSTRUME'] = 'SHFI-1'
header['PIPEDATE'] = (time.strftime("%y_%m_%d_%H:%M:%S"), 'Date pipeline was run')
from .version import version,githash
header['PIPEVERS'] = version
header['PIPEGIT'] = githash
import sdpy.version
header['SDPYVERS'] = (sdpy.version.version, 'sdpy version')
import astropy.version
header['ASTROPYV'] = (astropy.version.version,'Astropy version')
try:
import pyspeckit
header['PYSPECKV'] = pyspeckit.__version__
except (ImportError,AttributeError):
pass
import FITS_tools.version
header['FITSTOOV'] = (FITS_tools.version.version,'FITS_tools version')
import scipy.version
header['SCIPYVER'] = (scipy.version.version,'scipy version')
import numpy.version
header['NUMPYVER'] = (numpy.version.version,'numpy version')
import spectral_cube.version
header['SPCUBEVE'] = (spectral_cube.version.version,'spectral_cube version')
header['BUNIT'] = ('K', 'T_A*; ETAMB has efficiency')
header['ETAMB'] = (0.75, 'http://www.apex-telescope.org/telescope/efficiency/')
def make_blanks(gal, header, cubefilename, clobber=True, pixsize=7.2*u.arcsec):
lrange = (gal.l.wrap_at(180*u.deg).deg.min()+15/3600.,
gal.l.wrap_at(180*u.deg).deg.max()+15/3600.)
brange = gal.b.deg.min()+15/3600.,gal.b.deg.max()+15/3600.
log.info("Map extent automatically determined: "
"%0.2f < l < %0.2f, %0.2f < b < %0.2f" % (lrange[0], lrange[1],
brange[0], brange[1]))
naxis1 = (lrange[1]-lrange[0])/(pixsize.to(u.deg).value)
naxis2 = (brange[1]-brange[0])/(pixsize.to(u.deg).value)
restfreq = (header['RESTF']*u.MHz)
# beam major/minor axis are the same, gaussian for 12m telescope
# we convolved with a 10" FWHM Gaussian kernel, so we add that in quadrature
bmaj_ = (1.22*restfreq.to(u.m,u.spectral())/(12*u.m))*u.radian
bmaj = (bmaj**2 + (10*u.arcsec)**2)**0.5
cubeheader, flatheader = makecube.generate_header(np.mean(lrange),
np.mean(brange),
naxis1=naxis1,
naxis2=naxis2,
naxis3=4096,
coordsys='galactic',
ctype3='VRAD',
bmaj=bmaj.to(u.deg).value,
bmin=bmaj.to(u.deg).value,
pixsize=pixsize.to(u.arcsec).value,
cunit3='km/s',
output_flatheader='header.txt',
output_cubeheader='cubeheader.txt',
cd3=header['VRES'],
crval3=-1*header['VRES']*header['RCHAN'],
crpix3=1, clobber=True,
bunit="K",
restfreq=restfreq.to(u.Hz).value,
radio=True)
add_pipeline_header_data(cubeheader)
add_pipeline_header_data(flatheader)
makecube.make_blank_images(cubefilename, cubeheader=cubeheader,
flatheader=flatheader, clobber=clobber,
dtype='float32')
def make_blanks_freq(gal, header, cubefilename, clobber=True, pixsize=7.2*u.arcsec):
""" complete freq covg """
lrange = gal.l.wrap_at(180*u.deg).deg.min()+15/3600.,gal.l.wrap_at(180*u.deg).deg.max()+15/3600.
brange = gal.b.deg.min()+15/3600.,gal.b.deg.max()+15/3600.
log.info("Map extent: %0.2f < l < %0.2f, %0.2f < b < %0.2f" % (lrange[0],
lrange[1],
brange[0],
brange[1]))
naxis1 = int((lrange[1]-lrange[0])/(pixsize.to(u.deg).value)+10)
naxis2 = int((brange[1]-brange[0])/(pixsize.to(u.deg).value)+10)
restfreq = (header['RESTF']*u.MHz)
# beam major/minor axis are the same, gaussian for 12m telescope
# we convolved with a 10" FWHM Gaussian kernel, so we add that in quadrature
bmaj_ = (1.22*restfreq.to(u.m,u.spectral())/(12*u.m))*u.radian
bmaj = (bmaj**2 + (10*u.arcsec)**2)**0.5
rchan = header['RCHAN']
#scalefactor = 1./downsample_factor
#crpix3 = (rchan-1)*scalefactor+0.5+scalefactor/2.
cubeheader, flatheader = makecube.generate_header(np.mean(lrange),
np.mean(brange),
naxis1=naxis1,
naxis2=naxis2,
naxis3=header['NCHAN'],
coordsys='galactic',
bmaj=bmaj.to(u.deg).value,
bmin=bmaj.to(u.deg).value,
pixsize=pixsize.to(u.arcsec).value,
cunit3='Hz',
ctype3='FREQ',
output_flatheader='header.txt',
output_cubeheader='cubeheader.txt',
cd3=header['FRES']*1e6,
crval3=restfreq.to(u.Hz).value,
crpix3=rchan,
clobber=True, bunit="K",
restfreq=restfreq.to(u.Hz).value,
radio=True)
add_pipeline_header_data(cubeheader)
add_pipeline_header_data(flatheader)
makecube.make_blank_images(cubefilename, flatheader=flatheader,
cubeheader=cubeheader, clobber=clobber,
dtype='float32')
def make_blanks_merge(cubefilename, lowhigh='low', clobber=True,
width=1.0*u.GHz, lowest_freq=None, pixsize=7.2*u.arcsec,
restfreq=218222.192*u.MHz):
# total size is 2.3 x 0.4 degrees
# 1150x
# center is 0.55 -0.075
naxis1 = 1150
naxis2 = 200
# beam major/minor axis are the same, gaussian for 12m telescope
# we convolved with a 10" FWHM Gaussian kernel, so we add that in quadrature
bmaj_ = (1.22*restfreq.to(u.m,u.spectral())/(12*u.m))*u.radian
bmaj = (bmaj**2 + (10*u.arcsec)**2)**0.5
cd3 = ((1*u.km/u.s)/constants.c * 218.2*u.GHz).to(u.Hz).value
naxis3 = int(np.ceil(((width / (218.2*u.GHz) * constants.c) / (u.km/u.s)).decompose().value))
if lowest_freq is None:
lowest_freq = 216.8e9 if lowhigh=='low' else 218e9
cubeheader, flatheader = makecube.generate_header(0.55, -0.075,
naxis1=naxis1,
naxis2=naxis2,
naxis3=naxis3,
coordsys='galactic',
bmaj=bmaj.to(u.deg).value,
bmin=bmaj.to(u.deg).value,
pixsize=pixsize.to(u.arcsec).value,
cunit3='Hz',
ctype3='FREQ',
output_flatheader='header.txt',
output_cubeheader='cubeheader.txt',
cd3=cd3,
crval3=lowest_freq,
crpix3=1, clobber=True,
bunit="K",
restfreq=restfreq.to(u.Hz).value,
radio=True)
add_pipeline_header_data(cubeheader)
add_pipeline_header_data(flatheader)
makecube.make_blank_images(cubefilename, flatheader=flatheader,
cubeheader=cubeheader, clobber=clobber,
dtype='float32')
def data_diagplot(data, dataset, ext='png', newfig=False,
max_size=1024, freq=None, scans=None,
figure=None, axis=None):
log.info("Doing diagnostics in "+dataset)
if figure:
pass
elif newfig:
figure = pl.figure()
else:
figure = pl.figure(1)
figure.clf()
if (np.isnan(data)).all():
log.exception("ALL data is NaN in {0}".format(dataset))
import ipdb; ipdb.set_trace()
if np.any([d > max_size for d in data.shape]):
# downsample to *not less than* max_size
factors = [max([1,int(np.floor(d / max_size))]) for d in data.shape]
data = image_tools.downsample(data, min(factors))
if axis is None:
axis = figure.gca()
axis = mpl_plot_templates.imdiagnostics(data, axis=axis,
second_xaxis=freq)
if freq is not None:
#axis.set_xticklabels(np.interp(axis.get_xticks(),
# np.arange(freq.size),
# freq))
axis.figure.axes[5].set_xlabel("Frequency")
else:
axis.set_xlabel("Channel #")
axis.set_ylabel("Integration #")
if scans is not None and len(scans) < 50:
xlim = axis.get_xlim()
ylim = axis.get_ylim()
axis.hlines(scans, xlim[0], xlim[1], color='k', linestyle='--',
alpha=0.5)
axis.set_xlim(*xlim)
figfilename = os.path.join(diagplotdir, dataset+"_diagnostics."+ext)
checkdir_makedir(figfilename)
try:
pl.savefig(figfilename,bbox_inches='tight')
except Exception as ex:
log.exception(ex)
print(ex)
return axis
def diagplot(data, tsys, noise, dataset, freq=None, mask=None, ext='png',
newfig=False, **kwargs):
"""
Generate a set of diagnostic plots
Parameters
----------
data : `numpy.ndarray`
A 2D data set, with scans along the y-axis and frequency along the
x-axis
tsys : `numpy.ndarray`
A 1D data set giving TSYS at each time
noise : `numpy.ndarray`
The measured noise in each scan
freq : `numpy.ndarray` or None
The frequencies to plot along the X-axis
mask : `numpy.ndarray`
A boolean mask array with True = good values to be plotted
ext : str
The image extension to use when saving
"""
if newfig:
pl.figure()
else:
pl.figure(2)
pl.clf()
pl.subplot(2,1,1)
pl.plot(tsys,np.arange(tsys.size),alpha=0.5)
pl.xlabel("TSYS")
pl.ylabel("Integration")
pl.subplot(2,1,2)
pl.plot(tsys, noise, '.',alpha=0.5)
pl.xlabel("TSYS")
pl.ylabel("Noise")
figfilename = os.path.join(diagplotdir, dataset+"_tsys."+ext)
checkdir_makedir(figfilename)
pl.savefig(figfilename,bbox_inches='tight')
if newfig:
pl.figure()
else:
pl.figure(3)
pl.clf()
if freq is None:
freq = np.arange(data.shape[1])
pl.plot(freq, data.mean(axis=0))
if mask is not None:
# Avoid the incorrect appearance of interpolation by masking out
# intermediate values
d_to_plot = data.mean(axis=0)
d_to_plot[mask] = np.nan
pl.plot(freq, d_to_plot)
pl.xlabel("Frequency")
pl.ylabel("Mean Counts")
figfilename = os.path.join(diagplotdir, dataset+"_masked."+ext)
checkdir_makedir(figfilename)
pl.savefig(figfilename,bbox_inches='tight')
data_diagplot(data, dataset, ext=ext, newfig=newfig, freq=freq, **kwargs)
def build_cube_generic(window, freq=True, mergefile=None, datapath='./',
outpath='./', datasets=[], scanblsub=False,
shapeselect=None,
sourcename=None,
tsysrange=[100,250],
excludefitrange=None,
downsample_factor=None,
pixsize=7.2*u.arcsec,
kernel_fwhm=10/3600.,
pca_clean=False,
timewise_pca=True,
memmap=True,
mask_level_sigma=3,
blsub=True,
contsub=False,
verbose=False, debug=False, **kwargs):
"""
TODO: comment!
kwargs are passed to process_data
Parameters
----------
window : 'low' or 'high'
Which of the two APEX SHFI windows to use
freq : bool
If True, the cube will be in frequency units and will fully cover the
observed spectral range. If False, the cube will be in velocity units
centered on the observed rest frequency. This is ignored if mergefile
is set
"""
if window not in ('low','high'):
raise ValueError()
if mergefile:
cubefilename=os.path.join(outpath,"{0}_{1}".format(mergefile, window))
else:
# assume that we want a cube for EACH data set
cubefilename = None
#rcr = [-1000,0] if window == 'low' else [0,5000]
#xtel = 'AP-H201-F101' if window == 'high' else 'AP-H201-F102'
xtel = 'AP-H201-X202' if window=='low' else 'AP-H201-X201'
all_data,all_hdrs,all_gal = {},{},{}
for dataset in datasets:
apex_filename = os.path.join(datapath,dataset+".apex")
spectra,headers,indices = load_apex_cube(apex_filename,
downsample_factor=downsample_factor,
xtel=xtel,
sourcename=sourcename)
data,hdrs,gal = select_apex_data(spectra, headers, indices,
sourcename=sourcename,
shapeselect=shapeselect, xtel=xtel,
rchanrange=None,
galactic_coordinate_range=None,
tsysrange=tsysrange)
log.info("Selected %i spectra from %s" % (len(hdrs), dataset))
all_data[dataset] = data
all_hdrs[dataset] = hdrs
all_gal[dataset] = gal
all_gal_vect = coordinates.SkyCoord(np.hstack([all_gal[g].l.to(u.radian).value
for g in all_gal]) * u.radian,
np.hstack([all_gal[g].b.to(u.radian).value
for g in all_gal]) * u.radian,
frame='galactic')
all_gal_vect.l.wrap_angle = 180*u.deg
log.info("Data has been collected and flagged, now adding to cube.")
headerpars = dict(kernel_fwhm=kernel_fwhm, pca_clean=pca_clean,
timewise_pca=timewise_pca,
scanblsub=scanblsub)
if 'pcakwargs' in kwargs:
headerpars.update(kwargs['pcakwargs'])
if cubefilename is not None:
add_pipeline_parameters_to_file(cubefilename, 'generic', **headerpars)
for dataset in all_data:
if not mergefile:
cubefilename = os.path.join(outpath, "{0}_{1}_cube".format(dataset,window))
log.debug("Creating blanks for {0}".format(cubefilename))
if freq:
make_blanks_freq(all_gal_vect, hdrs[0], cubefilename,
clobber=True, pixsize=pixsize)
else:
make_blanks(all_gal_vect, hdrs[0], cubefilename, clobber=True,
pixsize=pixsize)
add_pipeline_parameters_to_file(cubefilename, 'generic', **headerpars)
if 'raw' in cubefilename:
import ipdb; ipdb.set_trace()
data = all_data[dataset]
hdrs = all_hdrs[dataset]
gal = all_gal[dataset]
data, gal, hdrs = process_data(data, gal, hdrs, dataset+"_"+xtel,
scanblsub=scanblsub, verbose=verbose,
timewise_pca=timewise_pca,
pca_clean=pca_clean, **kwargs)
add_apex_data(data, hdrs, gal, cubefilename,
excludefitrange=excludefitrange,
retfreq=freq,
varweight=True,
kernel_fwhm=kernel_fwhm,
debug=debug)
if not mergefile:
if contsub:
log.info("Continuum subtraction: {0}.".format(cubefilename))
contsub_cube(cubefilename)
elif blsub:
log.info("Baseline subtraction: {0}.".format(cubefilename))
baseline_cube(cubefilename+".fits",
mask_level_sigma=mask_level_sigma)
if mergefile and contsub:
log.info("Completed cubemaking. Continuum subtraction now.")
contsub_cube(cubefilename)
elif mergefile and blsub:
log.info("Completed cubemaking. Baseline subtraction now.")
baseline_cube(cubefilename, mask_level_sigma=mask_level_sigma)
# Downsample by some factor?
if downsample_factor:
downsample_cube(cubefilename, downsample_factor)
log.info("Done with "+cubefilename)
def downsample_cube(cubefilename, downsample_factor):
log.info("Downsampling "+cubefilename)
cube = fits.open(cubefilename+".fits")
avg = FITS_tools.downsample.downsample_axis(cube[0].data, downsample_factor, 0)
cube[0].data = avg
cube[0].header['CDELT3'] *= downsample_factor
scalefactor = 1./downsample_factor
crpix3 = (cube[0].header['CRPIX3']-1)*scalefactor+0.5+scalefactor/2.
cube[0].header['CRPIX3'] = crpix3
cube.writeto(cubefilename+'_downsampled.fits', clobber=True)
def build_cube_ao(window, freq=False, mergefile=None,
mergefilename=None,
datapath=aorawpath,
outpath=aopath,
datasets=datasets_ao,
kernel_fwhm=10/3600.,
scanblsub=False,
verbose=False,
debug=False,
pca_clean=True,
timewise_pca=True,
extra_suffix="",
**kwargs):
"""
TODO: comment!
kwargs are passed to process_data
"""
if window not in ('low','high'):
raise ValueError()
if mergefile:
if mergefilename is not None:
cubefilename = mergefilename
else:
cubefilename=os.path.join(outpath,'APEX_H2CO_merge_%s' % window)
elif freq:
cubefilename=os.path.join(outpath,'APEX_H2CO_Ao_Freq_%s' % window)
else:
cubefilename=os.path.join(outpath,'APEX_H2CO_Ao_%s' % window)
if extra_suffix:
cubefilename = cubefilename + extra_suffix
#rcr = [-1000,0] if window == 'low' else [0,5000]
xtel = 'AP-H201-F101' if window == 'high' else 'AP-H201-F102'
all_data,all_hdrs,all_gal = {},{},{}
for dataset in datasets:
apex_filename = os.path.join(datapath,dataset+".apex")
spectra,headers,indices = load_apex_cube(apex_filename,
#downsample_factor=downsample_factor,
xtel=xtel,
sourcename='SGRA')
data,hdrs,gal = select_apex_data(spectra, headers, indices,
sourcename='SGRA', shapeselect=4096,
xtel=xtel,
rchanrange=None,
#rchanrange=rcr,
tsysrange=[100,250])
log.info("Selected %i spectra from %s" % (len(hdrs), dataset))
#This flagging is more appropriately done in the process_data step
# # noise_cut = 4 determined by looking at a plot of noise vs time; 0.7%
# # of data is above 4
# # Extreme noise appears independent of TSYS!
# # 4% of data >0.75, but it's pretty bad
# noise = np.std(data,axis=1)
# freq_step = np.array([h['FRES'] for h in hdrs])
# exptime = np.array([h['EXPOSURE'] for h in hdrs])
# tsys = np.array([h['TSYS'] for h in hdrs])
# theoretical_rms = 2.0**0.5*tsys/(np.abs(freq_step*1.0e6)*exptime)**0.5
# bad = noise > theoretical_rms
# data = data[True-bad]
# gal = gal[True-bad]
# hdrs = [h for h,b in zip(hdrs,bad) if not b]
# print "Flagged out %i bad values (%0.1f%%)." % (bad.sum(),bad.sum()/float(bad.size))
all_data[dataset] = data
all_hdrs[dataset] = hdrs
all_gal[dataset] = gal
all_gal_vect = coordinates.SkyCoord(np.hstack([all_gal[g].l.to(u.radian).value
for g in all_gal]) * u.radian,
np.hstack([all_gal[g].b.to(u.radian).value
for g in all_gal]) * u.radian,
frame='galactic')
all_gal_vect.l.wrap_angle = 180*u.deg
if not mergefile:
if freq:
make_blanks_freq(all_gal_vect, hdrs[0], cubefilename, clobber=True)
else:
make_blanks(all_gal_vect, hdrs[0], cubefilename, clobber=True)
if freq:
excludefitrange=None
else:
excludefitrange = [700,1300] # FIX THIS when velos are fixed
headerpars = dict(kernel_fwhm=kernel_fwhm, pca_clean=pca_clean,
timewise_pca=timewise_pca,
scanblsub=scanblsub)
if 'pcakwargs' in kwargs:
headerpars.update(kwargs['pcakwargs'])
add_pipeline_parameters_to_file(cubefilename, 'ao', **headerpars)
log.info("Data has been collected and flagged, now adding to cube.")
for dataset in all_data:
data = all_data[dataset]
hdrs = all_hdrs[dataset]
gal = all_gal[dataset]
data, gal, hdrs = process_data(data, gal, hdrs, dataset+"_"+xtel,
scanblsub=scanblsub, verbose=verbose,
pca_clean=pca_clean,
timewise_pca=timewise_pca,
**kwargs)
add_apex_data(data, hdrs, gal, cubefilename,
excludefitrange=excludefitrange,
kernel_fwhm=kernel_fwhm,
retfreq=freq,
varweight=True,
debug=debug)
cube = fits.open(cubefilename+'.fits', memmap=False)
cont = fits.getdata(cubefilename+'_continuum.fits')
data = cube[0].data
cube[0].data = data - cont
cube.writeto(cubefilename+'_sub.fits', clobber=True)
if not mergefile:
# Downsample by averaging over a factor of 8
downsample_factor = 4 if freq else 8
avg = np.mean([cube[0].data[ii::downsample_factor,:,:] for ii in
xrange(downsample_factor)], axis=0)
cube[0].data = avg
cube[0].header['CDELT3'] *= float(downsample_factor)
scalefactor = 1./downsample_factor
crpix3 = (cube[0].header['CRPIX3']-1)*scalefactor+0.5+scalefactor/2.
cube[0].header['CRPIX3'] = crpix3
# from FITS_tools/hcongrid h['CRPIX2'] = (h['CRPIX2']-1)*scalefactor + scalefactor/2. + 0.5
cube.writeto(cubefilename+'_downsampled.fits', clobber=True)
def build_cube_2013(mergefile=None,
lowhigh='low',
downsample_factor=8,
datapath=june2013datapath,
outpath=june2013path,
datasets=datasets_2013,
kernel_fwhm=10/3600.,
scanblsub=False,
timewise_pca=False, # 2013 data can't handle cleaning.
pca_clean=False, # 2013 data can't handle cleaning. =(
extra_suffix="",
noisefactor=1.0,
verbose=True, **kwargs):
"""
Notes:
noisefactor set to 1.0 on Nov 15 because there is no data below that value
and only 18 above that value in one data set. The noise is lower than the
theoretical value for some reason.
"""
if mergefile:
cubefilename=os.path.join(outpath,mergefile)
else:
cubefilename=os.path.join(outpath,
'APEX_H2CO_2013_%s' % lowhigh)
if extra_suffix:
cubefilename = cubefilename + extra_suffix
xtel = 'AP-H201-X202' if lowhigh=='low' else 'AP-H201-X201'
# Flag out the central pixel: it is terrible.
flag_array = np.zeros(32768, dtype='bool')
flag_array[16384] = True
flag_array[-1] = True
if not mergefile:
# Need two loops. First one is just to determine map extent.
all_gal = {}
for dataset in datasets:
apex_filename=datapath+dataset+".apex"
spectra,headers,indices = load_apex_cube(apex_filename,
downsample_factor=downsample_factor,
xtel=xtel,
flag_array=flag_array,
sourcename='SGRA')
data, hdrs, gal = select_apex_data(spectra, headers, indices,
sourcename='SGRA',
shapeselect=32768/downsample_factor,
tsysrange=[100,325],
xtel=xtel,
rchanrange=None,
skip_data=True)
all_gal[dataset] = gal
all_gal_vect = coordinates.SkyCoord(np.hstack([all_gal[g].l.to(u.radian).value
for g in all_gal]) * u.radian,
np.hstack([all_gal[g].b.to(u.radian).value
for g in all_gal]) * u.radian,
frame='galactic')
all_gal_vect.l.wrap_angle = 180*u.deg
make_blanks_freq(all_gal_vect, hdrs[0], cubefilename, clobber=True)
headerpars = dict(kernel_fwhm=kernel_fwhm, pca_clean=pca_clean,
timewise_pca=timewise_pca,
scanblsub=scanblsub)
if 'pcakwargs' in kwargs:
headerpars.update(kwargs['pcakwargs'])
add_pipeline_parameters_to_file(cubefilename, '2013', **headerpars)
# need two loops to avoid loading too much stuff into memory
for dataset in datasets:
log.info("Adding data set {0} to cube file {1}".format(dataset, cubefilename))
apex_filename=datapath+dataset+".apex"
spectra,headers,indices = load_apex_cube(apex_filename,
downsample_factor=downsample_factor,
xtel=xtel,
flag_array=flag_array,
sourcename='SGRA')
if dataset == 'M-091.F-0019-2013-2013-06-13':
tsysrange=[100,260]
else:
tsysrange=[100,325]
data, hdrs, gal = select_apex_data(spectra, headers, indices,
sourcename='SGRA',
# NOT ignored, even though it's not used above...
# this is only OK because the bad shapes are from
# Saturn
shapeselect=32768/downsample_factor,
tsysrange=tsysrange,
xtel=xtel,
rchanrange=None,
skip_data=False)
data, gal, hdrs = process_data(data, gal, hdrs, dataset+"_"+xtel,
scanblsub=scanblsub, verbose=verbose,
timewise_pca=timewise_pca,
pca_clean=pca_clean,
**kwargs)
add_apex_data(data, hdrs, gal, cubefilename, retfreq=True,
kernel_fwhm=kernel_fwhm,
varweight=True,)
# FORCE cleanup
del data,hdrs,gal
cube = fits.open(cubefilename+'.fits', memmap=False)
cont = fits.getdata(cubefilename+'_continuum.fits')
data = cube[0].data
cube[0].data = data - cont
cube.writeto(cubefilename+'_sub.fits', clobber=True)
# Downsample by averaging over a factor of 8
# (this is extra downsampling)
avg = np.mean([cube[0].data[ii::2,:,:] for ii in xrange(2)], axis=0)
cube[0].data = avg
cube[0].header['CDELT3'] *= 2
scalefactor = 1./2.
crpix3 = (cube[0].header['CRPIX3']-1)*scalefactor+0.5+scalefactor/2.
cube[0].header['CRPIX3'] = crpix3
cube.writeto(cubefilename+'_downsampled.fits', clobber=True)
def build_cube_2014(sourcename,
mergefile=None,
lowhigh='low',
downsample_factor=8,
datapath=april2014path,
kernel_fwhm=10/3600.,
outpath=april2014path,
datasets=None,
scanblsub=False,
verbose=True,
pca_clean=False,
timewise_pca=False,
extra_suffix='',
tsysrange=[100,325],
posang=None,
calibration_factors=calibration_factors,
**kwargs
):
"""
Wrapper. Because each field has its own name in 2014, this will need to be
modified for the mergefile to accept wildcards or something for sourcename
selection
"""
if mergefile:
cubefilename=os.path.join(outpath,mergefile)
elif isinstance(sourcename, str):
cubefilename=os.path.join(outpath,
'APEX_H2CO_2014_%s_%s' % (sourcename, lowhigh))
else:
raise ValueError("Use a mergefile")
if extra_suffix:
cubefilename = cubefilename + extra_suffix
log.info("Building cubes for "+cubefilename)
assert lowhigh in ('low','high')
xtel = 'AP-H201-X202' if lowhigh=='low' else 'AP-H201-X201'
t0 = time.time()
# Flag out the central pixel: it is terrible.
flag_array = np.zeros(32768, dtype='bool')
flag_array[16384] = True
flag_array[-1] = True
if not mergefile:
# Need two loops. First one is just to determine map extent.
all_gal = {}
for dataset in datasets:
apex_filename=datapath+dataset+".apex"
log.info("".join(("Pre-Loading data for dataset ", dataset,
" to filename ", apex_filename, " t=",
str(time.time()-t0))))
found_data = load_apex_cube(apex_filename,
downsample_factor=downsample_factor,
xtel=xtel, sourcename=sourcename,
flag_array=flag_array,
posang=posang)
if found_data is None:
log.info("Skipping dataset {0} because it doesn't contain "
"{1} or {2}".format(dataset, sourcename, xtel))
continue
else:
spectra,headers,indices = found_data
data, hdrs, gal = select_apex_data(spectra, headers, indices,
sourcename=sourcename,
shapeselect=32768/downsample_factor,
tsysrange=[100,325],
xtel=xtel,
rchanrange=None,
skip_data=True)
all_gal[dataset] = gal
all_gal_vect = coordinates.SkyCoord(np.hstack([all_gal[g].l.to(u.radian).value
for g in all_gal]) * u.radian,
np.hstack([all_gal[g].b.to(u.radian).value
for g in all_gal]) * u.radian,
frame='galactic')
all_gal_vect.l.wrap_angle = 180*u.deg
log.info("Making blanks for "+cubefilename)
make_blanks_freq(all_gal_vect, hdrs[0], cubefilename, clobber=True)
headerpars = dict(kernel_fwhm=kernel_fwhm, pca_clean=pca_clean,
timewise_pca=timewise_pca,
scanblsub=scanblsub)
if 'pcakwargs' in kwargs:
headerpars.update(kwargs['pcakwargs'])
add_pipeline_parameters_to_file(cubefilename, '2014', **headerpars)
# need two loops to avoid loading too much stuff into memory
# (actually, we don't any more with memmaping)
for dataset in datasets:
apex_filename=datapath+dataset+".apex"
log.info("".join(("Loading data for dataset ",dataset," in filename ",
apex_filename," t=",str(time.time()-t0))))
found_data = load_apex_cube(apex_filename,
downsample_factor=downsample_factor,
xtel=xtel, sourcename=sourcename,
flag_array=flag_array,
posang=posang)
if found_data is None:
log.info("Skipping dataset {0} because it doesn't contain "
"{1} or {2}".format(dataset, sourcename, xtel))
continue
else:
spectra,headers,indices = found_data
log.info("".join(("Selecting data for dataset ", dataset,
" in filename ", apex_filename, " t=",
str(time.time()-t0))))
data, hdrs, gal = select_apex_data(spectra, headers, indices,
sourcename=sourcename,
# NOT ignored, even though it's not used above...
# this is only OK because the bad shapes are from
# Saturn
#shapeselect=4096,
shapeselect=32768/downsample_factor,
tsysrange=tsysrange,
xtel=xtel,
rchanrange=None,
skip_data=False)
log.info("".join(("Processing data for dataset ", dataset,
" in filename ", apex_filename, " t=",
str(time.time()-t0))))
data, gal, hdrs = process_data(data, gal, hdrs, os.path.join(outpath,
dataset)+"_"+xtel,
scanblsub=scanblsub, verbose=verbose,
timewise_pca=timewise_pca,
pca_clean=pca_clean,
**kwargs)
log.info("".join(("Adding data for dataset ", dataset,
" to filename ", cubefilename, " t=",
str(time.time()-t0))))
# TODO: parse date and determine whether a calibration factor needs to
# be applied
obsdates = [Time(h['DOBS'], format='jyear') for h in hdrs]
start,end = min(obsdates),max(obsdates)
calkey = cal_date_overlap([start,end])
calfactor = calibration_factors[calkey]
add_apex_data(data*calfactor, hdrs, gal, cubefilename, retfreq=True,
kernel_fwhm=kernel_fwhm, varweight=True,
# downsample factor for freqarr
)
# FORCE cleanup
log.info("".join(("Clearing data for dataset ", dataset,
" to filename ", cubefilename, " t=",
str(time.time()-t0))))
del data,hdrs,gal
log.info("".join(("Continuum subtracting ",cubefilename)))
cube = fits.open(cubefilename+'.fits', memmap=False)
cont = fits.getdata(cubefilename+'_continuum.fits')
data = cube[0].data
cube[0].data = data - cont
cube.writeto(cubefilename+'_sub.fits', clobber=True)
log.info("Downsampling "+cubefilename)
# Downsample by averaging over a factor of 8
avg = FITS_tools.downsample.downsample_axis(cube[0].data, 2, 0)
cube[0].data = avg
cube[0].header['CDELT3'] *= 2
scalefactor = 1./2.
crpix3 = (cube[0].header['CRPIX3']-1)*scalefactor+0.5+scalefactor/2.
cube[0].header['CRPIX3'] = crpix3
cube.writeto(cubefilename+'_downsampled.fits', clobber=True)
log.info("Done with "+cubefilename)
def make_high_mergecube(pca_clean={'2014':False,
'2013':False,
'ao':False},
scanblsub={'2014':False, '2013':False, 'ao':False},
timewise_pca={'2014': True, '2013':False, 'ao':True},
mergefile2=None):
if mergefile2 is None:
raise ValueError("Must specify a merge filename")
#if pca_clean:
# if timewise_pca:
# mergefile2 = 'APEX_H2CO_merge_high_timepca'
# else:
# mergefile2 = 'APEX_H2CO_merge_high'
#else:
# mergefile2 = 'APEX_H2CO_merge_high_nopca'
for suff in ("_2014_bscans", "_2014_lscans", "_2013","_ao"):
make_blanks_merge(os.path.join(mergepath,mergefile2+suff),
lowhigh='high', lowest_freq=218e9, width=1.0*u.GHz)
mapnames = ['MAP_{0:03d}'.format(ii) for ii in range(1,130)]
log.info("Building cubes: "+str(mapnames))
# Frequency: (216.9, 219.4)
build_cube_2014(mapnames,
mergefile=mergefile2+"_2014_bscans",
posang=[140,160],
outpath=mergepath,
datapath=april2014path,
lowhigh='low',
pca_clean=pca_clean['2014'],
timewise_pca=timewise_pca['2014'],
scanblsub=scanblsub['2014'],
datasets=datasets_2014)
build_cube_2014(mapnames,
mergefile=mergefile2+"_2014_lscans",
posang=[50,70],
outpath=mergepath,
datapath=april2014path,
lowhigh='low',
pca_clean=pca_clean['2014'],
timewise_pca=timewise_pca['2014'],
scanblsub=scanblsub['2014'],
datasets=datasets_2014)
log.info("Building Ao cubes")
# ('ao', 'high'): (218.0, 219.0),
build_cube_ao(window='high', mergefile=True, freq=True, outpath=mergepath,
pca_clean=pca_clean['ao'], timewise_pca=timewise_pca['ao'],
mergefilename=os.path.join(mergepath, mergefile2+"_ao"),
scanblsub=scanblsub['ao'],
datapath=aorawpath)
log.info("Building 2013 cubes")
# (2013, 'high'): (217.5, 220.0)
build_cube_2013(mergefile=mergefile2+"_2013",
outpath=mergepath,
datapath=june2013datapath,
lowhigh='high',
timewise_pca=timewise_pca['2013'],
pca_clean=pca_clean['2013'],
scanblsub=scanblsub['2013'])
do_plait_h2comerge(mergepath=mergepath, mergefile2=mergefile2)
def do_plait_h2comerge(mergepath=mergepath, mergefile2=None):
"""
doplait, not yoplait
(create the merged, plaited cube)
default is
do_plait(mergefile2='APEX_H2CO_merge_high')
"""
from sdpy import plait
# plaiting doesn't work well for unequal weights or large swathes
# of missing data
all_targets = ("_2014_bscans", "_2014_lscans", "_2013","_ao")
plait_targets = all_targets[:2]
def fnify(suff, end='.fits'):
return os.path.join(mergepath, mergefile2+suff+end)
headers = [fits.getheader(fnify(suff))
for suff in plait_targets]
header = headers[0]
for h in headers:
for k in h:
header[k] = h[k]
cubes = [fits.getdata(fnify(suff))
for suff in plait_targets]
angles = [0, 90]#, 58.6, 58.6]
cube_comb = plait.plait_cube(cubes, angles=angles, scale=3)
hdu = fits.PrimaryHDU(data=cube_comb, header=header)
hdu.writeto(fnify("_plait"), clobber=True)
comb_weights = np.sum([fits.getdata(fnify(suff, '_nhits.fits'))
for suff in plait_targets], axis=0)
whdu = fits.PrimaryHDU(data=comb_weights,
header=fits.getheader(fnify(suff, '_nhits.fits')))
whdu.writeto(fnify('_nhits'), clobber=True)
# Add back the 2013 and Ao data without plaiting (since that doesn't work)
data = [cube_comb] + [np.nan_to_num(fits.getdata(fnify(suff)))
for suff in all_targets[2:]]
weights = ([comb_weights] +
[fits.getdata(fnify(suff, '_nhits.fits'))
for suff in all_targets[2:]])
sweights = np.sum(weights, axis=0)
total_stack = (np.sum([(d*w) for d,w in zip(data,weights)], axis=0) /
sweights)
total_stack[:,sweights<0.5] = np.nan
for h in [fits.getheader(fnify(suff)) for suff in all_targets[2:]]:
for k in h:
header[k] = h[k]
hdu = fits.PrimaryHDU(data=total_stack, header=header)
hdu.writeto(fnify('_plait_all'), clobber=True)
whdu = fits.PrimaryHDU(data=sweights, header=header)
whdu.writeto(fnify('_plait_all_nhits'), clobber=True)
# Smooth and downsample finally...
cube = spectral_cube.SpectralCube.read(fnify('_plait_all'))
outheader = cube.header.copy()
outheader['CRPIX3'] = 1
outheader['CRVAL3'] = 218e9
outheader['CUNIT3'] = 'Hz'
outheader['CDELT3'] = 1453333. # about 2km/s
outheader['NAXIS3'] = 1e9 / outheader['CDELT3'] # 688 pixels
# kw = 2 pix
cubesm = cube_regrid.spatial_smooth_cube(cube.filled_data[:], 2,
use_fft=False,
numcores=4)
cubesm = cube_regrid.spectral_smooth_cube(cubesm, 2,
use_fft=False,
numcores=4)
cubesm[cubesm==0] = np.nan
hdu = fits.PrimaryHDU(data=cubesm, header=cube.header)
newhdu = cube_regrid.regrid_cube_hdu(hdu, outheader, order=2,
prefilter=False)
newhdu.writeto(fnify('_plait_all_smooth'), output_verify='fix', clobber=True)
baseline_cube(fnify('_plait_all'), polyspline='spline', mask_level_sigma=5,
order=3)
# Can't get this to work - apparently there are some entirely flagged-out
# data sets
baseline_cube(fnify('_plait_all_smooth'), polyspline='spline',
mask_level_sigma=5, order=3, splinesampling=50)
def make_low_mergecube(pca_clean={'2014':False,
'2013':False,
'ao':False},
scanblsub={'2014':False, '2013':False, 'ao':False},
timewise_pca={'2014': True, '2013':False, 'ao':True},
mergefile1 = 'APEX_H2CO_merge_low',):
make_blanks_merge(os.path.join(mergepath,mergefile1), lowhigh='low')
for suff in ("_2014_bscans", "_2014_lscans", "_2013","_ao"):
make_blanks_merge(os.path.join(mergepath,mergefile1+suff),
lowhigh='high', lowest_freq=216.9e9, width=2.0*u.GHz)
mapnames = ['MAP_{0:03d}'.format(ii) for ii in range(1,130)]
log.info("Building cubes: "+str(mapnames)+" low bscans")
build_cube_2014(mapnames,
mergefile=mergefile1+"_2014_bscans",
posang=[140,160],
outpath=mergepath,
datapath=april2014path,
lowhigh='low',
pca_clean=pca_clean['2014'],
timewise_pca=timewise_pca['2014'],
scanblsub=scanblsub['2014'],
datasets=datasets_2014)
log.info("Building cubes: "+str(mapnames)+" low lscans")
build_cube_2014(mapnames,
mergefile=mergefile1+"_2014_lscans",
posang=[50,70],
outpath=mergepath,
datapath=april2014path,
lowhigh='low',
pca_clean=pca_clean['2014'],
timewise_pca=timewise_pca['2014'],
scanblsub=scanblsub['2014'],
datasets=datasets_2014)
log.info("Building Ao cubes")
# ('ao', 'high'): (218.0, 219.0),
build_cube_ao(window='low', mergefile=True, freq=True, outpath=mergepath,
pca_clean=pca_clean['ao'], timewise_pca=timewise_pca['ao'],
mergefilename=os.path.join(mergepath, mergefile1+"_ao"),
scanblsub=scanblsub['ao'],
datapath=aorawpath)
log.info("Building 2013 cubes")
# (2013, 'high'): (217.5, 220.0)
build_cube_2013(mergefile=mergefile1+"_2013",
outpath=mergepath,
datapath=june2013datapath,
lowhigh='low',
timewise_pca=timewise_pca['2013'],
pca_clean=pca_clean['2013'],
scanblsub=scanblsub['2013'])
print("TODO: plait the low-frequency merge.")
print("TODO: possible merge the ao low/high into the low-merge?")
def integrate_slices_high(prefix='merged_datasets/APEX_H2CO_merge_high_sub'):
ffile = fits.open(prefix+'.fits')
cd3 = (ffile[0].header['CD3_3'] if 'CD3_3' in ffile[0].header else
ffile[0].header['CDELT3']) / 1e3 # convert to km/s (I hope)
integ1,hdr = cubes.integ(ffile, [235,344], average=np.nansum) # first H2CO line: blue
hdu1 = fits.PrimaryHDU(data=integ1/cd3, header=hdr)
hdu1.writeto(prefix+"_H2CO_303-202_blue.fits", clobber=True)
integ2,hdr = cubes.integ(ffile, [161,235], average=np.nansum) # first H2CO line: red
hdu2 = fits.PrimaryHDU(data=integ2/cd3, header=hdr)
hdu2.writeto(prefix+"_H2CO_303-202_red.fits", clobber=True)
integ4,hdr = cubes.integ(ffile, [161,344], average=np.nansum) # first H2CO line: red
hdu4 = fits.PrimaryHDU(data=integ4/cd3, header=hdr)
hdu4.writeto(prefix+"_H2CO_303-202.fits", clobber=True)
integ3,hdr = cubes.integ(ffile, [513,615], average=np.nansum) # second H2CO line: blue
hdu3 = fits.PrimaryHDU(data=integ3/cd3, header=hdr)
hdu3.writeto(prefix+"_H2CO_322-221_blue.fits", clobber=True)
def integrate_slices_low(prefix='merged_datasets/APEX_H2CO_merge_low_sub'):
ffile = fits.open(prefix+'.fits')
integ1,hdr = cubes.integ(ffile, [335,446], average=np.nansum)
hdu1 = fits.PrimaryHDU(data=integ1, header=hdr)
hdu1.writeto(prefix+"_SiO5-4.fits", clobber=True)
def integrate_mask(prefix, mask=h2copath+'APEX_H2CO_303_202_mask.fits',
maskpre=''):
"""
Integrate a cube with name specified by 'prefix' using a specific mask
"""
if isinstance(mask,str):
mask = fits.getdata(mask).astype('bool')
ffile = fits.open(prefix+'.fits')
cd = ffile[0].header['CDELT3']
ffile[0].data *= mask * cd
ffile[0].data[~mask.astype('bool')] = np.nan
integ1,hdr = cubes.integ(ffile, [0,ffile[0].shape[0]], average=np.nansum)
hdr['BUNIT'] = ('K km/s',"Integrated over masked region")
hdu1 = fits.PrimaryHDU(data=integ1, header=hdr)
hdu1.writeto("{0}_{1}mask_integ.fits".format(prefix, maskpre),
clobber=True)
def integrate_h2co_by_freq(filename):
import spectral_cube
cube = spectral_cube.SpectralCube.read(filename)
#if 'high' in filename:
# cocube = cube
#else:
# cocube = spectral_cube.SpectralCube.read(filename.replace('low','high'))
#mcube = cocube.with_spectral_unit(u.km/u.s,
# rest_value=bright_lines['13CO']*u.GHz,
# velocity_convention='radio')
#coscube = mcube.spectral_slab(-100*u.km/u.s, 150*u.km/u.s)
#mask = coscube > 1
for line in bright_lines:
scube = cube.with_spectral_unit(u.km/u.s,
rest_value=bright_lines[line]*u.GHz,
velocity_convention='radio')
subcube1 = scube.spectral_slab(-100*u.km/u.s, 150*u.km/u.s)
ncube = scube.spectral_slab(-150*u.km/u.s, -100*u.km/u.s)
noise = ncube.apply_numpy_function(np.std, axis=0)
#mask._wcs = subcube1.wcs
subcube = subcube1.with_mask(subcube1>noise)#.with_mask(mask)
if subcube.shape[0] == 1:
# implies out of range
continue
mom0 = subcube.moment0()
mom1 = subcube.moment1()
mom2 = subcube.moment2()
fn = os.path.split(filename)[1]
outfn = 'projections/'+fn.replace(".fits","_{line}_{mom}.fits")
mom0.hdu.writeto(outfn.format(line=line, mom='mom0'),clobber=True)
mom1.hdu.writeto(outfn.format(line=line, mom='mom1'),clobber=True)
mom2.hdu.writeto(outfn.format(line=line, mom='mom2'),clobber=True)
def compute_noise_high(prefix=mergepath+'APEX_H2CO_merge_high_sub',
pixrange=[700,900]):
ffile = fits.open(prefix+'.fits')
try:
mad_std([0,1,2,3,4])
integ1,hdr = cubes.integ(ffile, pixrange, average=mad_std)
except:
integ1,hdr = cubes.integ(ffile, pixrange, average=mad_std)
integ1.fill_value = np.nan
hdu1 = fits.PrimaryHDU(data=integ1.filled(), header=hdr)
hdu1.writeto(prefix+"_noise.fits", clobber=True)
def compute_noise_low(prefix=mergepath+'APEX_H2CO_merge_low_sub',pixrange=[512,675]):
ffile = fits.open(prefix+'.fits')
integ1,hdr = cubes.integ(ffile, pixrange, average=np.nanstd)
hdu1 = fits.PrimaryHDU(data=integ1, header=hdr)
hdu1.writeto(prefix+"_noise.fits", clobber=True)
def compute_noise_extras(prefix=june2013path+'APEX_H2CO_2013_%s_sub',
lowhigh='high',
pixrange=[0,4096]):
ffile = fits.open((prefix % lowhigh)+'.fits')
integ1,hdr = cubes.integ(ffile, pixrange, average=np.nanstd)
hdu1 = fits.PrimaryHDU(data=integ1, header=hdr)
hdu1.writeto(prefix+"_noise.fits", clobber=True)
def signal_to_noise_mask_cube(prefix=None, cube=None, noise=None,
kernelsize=[2,2,2], grow=1, sigmacut=3,
mask_hc3n=False):
"""
Generate a signal-to-noise mask and use it to select the detected pixels in
a cube.
The algorithm finds all pixels in a smoothed version of the cube with
values >``sigmacut``*noise. It then grows that mask by ``grow`` pixels in
each direction.
Parameters
----------
prefix : str
The prefix for the FITS input and output files
cube : np.ndarray
Alternative to prefix: can pass in a cube directly
noise : np.ndarray
an array that is broadcastable to the cube shape
kernelsize : (int,int,int)
A length-3 list or tuple specifying the size of the kernel to smooth
the cube with.
grow : int
The number of pixels to grow the mask in each direction
sigmacut : float
The significance level of the pixels to include
"""
if prefix is not None:
ffile = fits.open(prefix+'.fits')
cube = ffile[0].data
if noise is None:
noise = fits.getdata(prefix+'_noise.fits')
log.info("Initiating cube smooth of {0}.".format(prefix))
elif None in (cube,noise):
raise ValueError("Must specify cube and noise if you do not "
"specify a prefix")
t0 = time.time()
smcube = cube_regrid.gsmooth_cube(cube, kernelsize, use_fft=False,
kernelsize_mult=3)
log.info("Completed cube smooth in %i seconds" % (time.time()-t0))
mask = smcube > noise*sigmacut
mask_grow = scipy.ndimage.morphology.binary_dilation(mask, iterations=grow)
cube[~mask_grow] = np.nan
if prefix is None:
return cube, mask_grow
ffile[0].data = cube
ffile[0].writeto(prefix+"_snmasked.fits", clobber=True)
ffile[0].data = mask_grow.astype('int')
if mask_hc3n:
maskhdu = mask_out_hc3n(ffile[0])
maskhdu.writeto(prefix+"_mask.fits", clobber=True)
else:
ffile[0].writeto(prefix+"_mask.fits", clobber=True)
def do_sncube_masking_hi(prefix=h2copath+'APEX_H2CO_303_202'):
# 0-25 not checked! arbitrary choice.
compute_noise_high(prefix, pixrange=[0,25])
signal_to_noise_mask_cube(prefix)
integrate_slices_high(prefix+'_snmasked')
def extract_subcube(cubefilename, outfilename, linefreq=218.22219*u.GHz,
debug=False, smooth=False, vsmooth=False, naxis3=300,
vmin=-155*u.km/u.s, vmax=155*u.km/u.s):
# Picked a tighter range to avoid other lines contaminating H2CO
#vmin=-225*u.km/u.s, vmax=275*u.km/u.s):
t0 = time.time()
log.info(("Extracting subcube at {0} from {1}"
" with smooth={2} and vsmooth={3}").format(linefreq,
cubefilename, smooth,
vsmooth))
cube = spectral_cube.SpectralCube.read(cubefilename)
vcube = cube.with_spectral_unit(u.km/u.s, rest_value=linefreq,
velocity_convention='radio')
svcube = vcube.spectral_slab(vmin, vmax)
crval3 = vmin.to(u.km/u.s).value
outheader = svcube.header
outheader['CRPIX3'] = 1
outheader['CRVAL3'] = crval3
outheader['CUNIT3'] = 'km/s'
outheader['CDELT3'] = 1.0
outheader['NAXIS3'] = naxis3
outheader['NAXIS2'] = svcube.shape[1]
outheader['NAXIS1'] = svcube.shape[2]
if smooth:
#cubesm = gsmooth_cube(ffile[0].data, [3,2,2], use_fft=True,
# psf_pad=False, fft_pad=False)
# smoothed with 2 pixels -> sigma=10", fwhm=23"
# this is an "optimal smooth", boosting s/n and smoothing to 36"
# resolution.
kw = 2 if not vsmooth else 4
cubesm = cube_regrid.spatial_smooth_cube(svcube.filled_data[:], kw,
use_fft=False,
numcores=4)
cubesm = cube_regrid.spectral_smooth_cube(cubesm, 3/2.35,
use_fft=False,
numcores=4)
svcube._data = cubesm
outheader['CDELT3'] = outheader['CDELT3'] * kw
outheader['NAXIS3'] = outheader['NAXIS3'] / kw
crpix3 = (outheader['CRPIX3']-1)*(1./kw)+0.5+(1./kw)/2.
outheader['CRPIX3'] = crpix3
# Now that we've written this out, we use interpolation to force the cube
# onto a grid that starts at *exactly* vmin
newhdu = cube_regrid.regrid_cube_hdu(svcube.hdu, outheader, order=1, prefilter=False)
newhdu.writeto(outfilename, output_verify='fix', clobber=True)
log.info("Completed cube extraction to {1} in {0} seconds.".format(time.time()-t0,
outfilename))
return newhdu
def make_smooth_noise(noisefilename, outfilename, kernelwidth=2, clobber=True):
data = fits.getdata(noisefilename)
kernel = Gaussian2DKernel(stddev=kernelwidth)
kernel.normalize('integral')
smdata = convolve(data, kernel)
kernel.normalize('peak')
npix = kernel.array.sum()
# Average down the noise by sqrt(npix)
hdu = fits.PrimaryHDU(data=(smdata/npix**0.5).astype(data.dtype),
header=fits.getheader(noisefilename))
hdu.writeto(outfilename, clobber=clobber)
def make_line_mask(freqarr, lines=bright_lines):
mask = np.ones(freqarr.size, dtype='bool')
for ln,lf in lines.items():
bw = bandwidths[ln]
wh = (lf*1e3-bw < freqarr) & (lf*1e3+bw > freqarr)
mask[wh] = False
return mask
def do_extract_subcubes(outdir=molpath, merge_prefix='APEX_H2CO_merge',
cubefilename=None,
frange=None, lines=all_lines,
suffix="_sub",
vsmooth=False,
integrate=False):
"""
Parameters
----------
integrate : bool
Integrate the extracted cube using a mask. WARNING: doesn't check
if the mask exists!
Examples
--------
>>> do_extract_subcubes(outdir='/Volumes/passport/apex/merged_datasets/molecule_cubes',
... suffix='', merge_prefix='APEX_H2CO_2014_merge')
>>> do_extract_subcubes(lines=lines, merge_prefix='APEX_H2CO_merge',
... suffix='_plait_all')
"""
if cubefilename is None:
cubefilenames = [os.path.join(mergepath,
merge_prefix+'_low{0}.fits'.format(suffix)),
os.path.join(mergepath,
merge_prefix+'_high{0}.fits'.format(suffix))]
else:
cubefilenames = [cubefilename]
# For each cube, (maybe) load it, check it, then move on
# (the previous method would only match things in the first cube selected...)
for cubefilename in cubefilenames:
if not os.path.exists(cubefilename):
log.info("File {0} does not exist. Skipping.".format(cubefilename))
continue
for line,freq in lines.items():
if frange is not None:
if freq<frange[0] or freq>frange[1]:
log.info("Skipping line {0}".format(line))
continue
log.info("Extracting {0} from {1}".format(line,cubefilename))
header = fits.getheader(cubefilename)
ww = wcs.WCS(header)
wspec = ww.sub([wcs.WCSSUB_SPECTRAL])
nax = header['NAXIS%i' % (ww.wcs.spec+1)]
freqarr = wspec.wcs_pix2world(np.arange(nax),0)[0]
# Note that this leaves open the possibility of extracting incomplete
# cubes from the edges of the high/low cubes...
if freq*1e9 > freqarr.min() and freq*1e9 < freqarr.max():
extract_subcube(cubefilename,
os.path.join(outdir, 'APEX_{0}.fits').format(line),
linefreq=freq*u.GHz)
extract_subcube(cubefilename,
os.path.join(outdir, 'APEX_{0}_smooth.fits').format(line),
linefreq=freq*u.GHz, smooth=True)
if vsmooth:
extract_subcube(cubefilename,
os.path.join(outdir, 'APEX_{0}_vsmooth.fits').format(line),
linefreq=freq*u.GHz, smooth=True, vsmooth=True)
if integrate:
integrate_mask(os.path.join(outdir, 'APEX_{0}'.format(line)))
integrate_mask(os.path.join(outdir, 'APEX_{0}_smooth'.format(line)),
mask=h2copath+'APEX_H2CO_303_202_smooth_mask.fits')
integrate_mask(os.path.join(outdir, 'APEX_{0}'.format(line)),
mask=h2copath+'APEX_13CO_matched_H2CO_mask.fits',
maskpre='13co',
)
integrate_mask(os.path.join(outdir, 'APEX_{0}_smooth'.format(line)),
mask=h2copath+'APEX_13CO_matched_H2CO_smooth_mask.fits',
maskpre='13co',
)
else:
log.info("Skipping line {0}".format(line))
def do_everything(pca_clean={'2014':False, '2013':False, 'ao':False},
scanblsub={'2014':False, '2013':False, 'ao':False},
timewise_pca={'2014':True, '2013':False, 'ao':True},
mergefile2='APEX_H2CO_merge_high',
mergepath=mergepath, molpath=molpath, h2copath=h2copath):
make_high_mergecube(mergefile2=mergefile2, pca_clean=pca_clean,
scanblsub=scanblsub, timewise_pca=timewise_pca)
do_postprocessing(mergepath=mergepath, molpath=molpath, h2copath=h2copath)
extract_co_subcubes(mergepath=mergepath)
def do_postprocessing(molpath=molpath, mergepath=mergepath, h2copath=h2copath):
#make_low_mergecube() # there's only one really useful overlap region
#os.chdir(mergepath)
# vsmoothds is made here:
#os.system('./APEX_H2CO_merge_high_starlink_custom.sh')
#os.chdir('../')
# OLD: merge_prefix = 'APEX_H2CO_merge_high' # Oct 4, 2014
merge_prefix='APEX_H2CO_merge_high_plait_all'
do_extract_subcubes(outdir=molpath, frange=[218,219],
cubefilename=os.path.join(mergepath,
merge_prefix+".fits"),
lines=lines218)
# Because I really want to see SiO...
do_extract_subcubes(outdir=molpath,
lines={'SiO_54':217.10498},
merge_prefix='APEX_H2CO_2014_merge', suffix="")
compute_noise_high(prefix=mergepath+merge_prefix, pixrange=[700,900])
compute_noise_high(prefix=mergepath+merge_prefix+"_smooth", pixrange=[320,400])
#compute_noise_high(mergepath+merge_prefix+'_smooth',[203,272])
#compute_noise_high(mergepath+'APEX_H2CO_merge_high_vsmoothds',[203,272])
#compute_noise_high(mergepath+'APEX_H2CO_303_202_vsmooth',[75,100])
#compute_noise_low()
signal_to_noise_mask_cube(os.path.join(molpath,'APEX_H2CO_303_202'),
noise=fits.getdata(os.path.join(mergepath,
'APEX_H2CO_merge_high_plait_all_noise.fits')),
sigmacut=2,
grow=2,
mask_hc3n=False) # unfortunately, flagged out brick & Sgr A
signal_to_noise_mask_cube(molpath+'APEX_H2CO_303_202_smooth',
noise=fits.getdata(mergepath+'APEX_H2CO_merge_high_plait_all_smooth_noise.fits'),
sigmacut=3,
mask_hc3n=False)
signal_to_noise_mask_cube(os.path.join(molpath,'APEX_H2CO_321_220'),
noise=fits.getdata(os.path.join(mergepath,
'APEX_H2CO_merge_high_plait_all_noise.fits')),
sigmacut=2,
grow=2)
signal_to_noise_mask_cube(molpath+'APEX_H2CO_321_220_smooth',
noise=fits.getdata(mergepath+'APEX_H2CO_merge_high_plait_all_smooth_noise.fits'),
sigmacut=2)
integrate_mask(molpath+'APEX_H2CO_303_202',
mask=molpath+'APEX_H2CO_303_202_mask.fits')
integrate_mask(molpath+'APEX_H2CO_303_202_smooth',
mask=molpath+'APEX_H2CO_303_202_smooth_mask.fits')
integrate_mask(molpath+'APEX_H2CO_303_202',
mask=molpath+'APEX_H2CO_321_220_mask.fits',
maskpre='321')
integrate_mask(molpath+'APEX_H2CO_303_202_smooth',
mask=molpath+'APEX_H2CO_321_220_smooth_mask.fits',
maskpre='321')
for fn in glob.glob(os.path.join(mergepath,'APEX_H2CO_30*fits')):
try:
os.symlink(fn,
os.path.join(h2copath,os.path.split(fn)[-1]))
except OSError:
log.debug("Skipped file {0} because it exists".format(fn))
# Create a few integrated H2CO 303 maps
integrate_slices_high(molpath+'APEX_H2CO_303_202_snmasked')
# Use spectral_cube to do a bunch of integrations
# PATH SENSITIVE
# integrate_h2co_by_freq(mergepath+mergefile2+".fits")
# On second thought, let's not go to camelot
# (this function proved ineffective)
for line in lines218:
fn = mergepath+'APEX_{0}.fits'.format(line)
if os.path.exists(fn):
integrate_mask(molpath+'APEX_{0}'.format(line),
mask=molpath+'APEX_H2CO_303_202_mask.fits')
integrate_mask(molpath+'APEX_{0}'.format(line),
mask=molpath+'APEX_H2CO_321_220_mask.fits',
maskpre='321')
integrate_mask(molpath+'APEX_{0}_smooth'.format(line),
mask=molpath+'APEX_H2CO_303_202_smooth_mask.fits')
integrate_mask(molpath+'APEX_{0}_smooth'.format(line),
mask=molpath+'APEX_H2CO_321_220_smooth_mask.fits',
maskpre='321')
log.debug("Integrated masked file {0}".format(fn))
else:
log.debug("File {0} does not exist".format(fn))
for line in lines218:
if os.path.exists(molpath+'APEX_{0}.fits'.format(line)):
baseline_cube(molpath+'APEX_{0}.fits'.format(line),
maskfn=molpath+'APEX_H2CO_303_202_mask.fits',
order=7)
baseline_cube(molpath+'APEX_{0}_smooth.fits'.format(line),
maskfn=molpath+'APEX_H2CO_303_202_smooth_mask.fits',
order=7)
#compute_noise_high(molpath+'APEX_H2CO_303_202_bl',[350,400])
#compute_noise_high(molpath+'APEX_H2CO_303_202_smooth_bl',[175,200])
#compute_noise_high(molpath+'APEX_H2CO_303_202_vsmooth_bl',[80,100])
signal_to_noise_mask_cube(molpath+'APEX_H2CO_303_202_bl',
noise=fits.getdata(mergepath+'APEX_H2CO_merge_high_plait_all_noise.fits'),
grow=2,
sigmacut=2,
mask_hc3n=False)
signal_to_noise_mask_cube(molpath+'APEX_H2CO_303_202_smooth_bl',
noise=fits.getdata(mergepath+'APEX_H2CO_merge_high_plait_all_smooth_noise.fits'),
sigmacut=3,
mask_hc3n=False)
signal_to_noise_mask_cube(molpath+'APEX_H2CO_321_220_bl',
noise=fits.getdata(mergepath+'APEX_H2CO_merge_high_plait_all_noise.fits'),
sigmacut=2,
grow=2)
signal_to_noise_mask_cube(molpath+'APEX_H2CO_321_220_smooth_bl',
noise=fits.getdata(mergepath+'APEX_H2CO_merge_high_plait_all_noise.fits'),
sigmacut=2,
grow=2)
for line in lines218:
if os.path.exists(molpath+'APEX_{0}_bl.fits'.format(line)):
integrate_mask(molpath+'APEX_{0}_bl'.format(line),
mask=molpath+'APEX_H2CO_303_202_bl_mask.fits')
integrate_mask(molpath+'APEX_{0}_smooth_bl'.format(line),
mask=molpath+'APEX_H2CO_303_202_smooth_bl_mask.fits')
integrate_mask(molpath+'APEX_{0}_bl'.format(line),
mask=molpath+'APEX_H2CO_321_220_bl_mask.fits',
maskpre='321')
integrate_mask(molpath+'APEX_{0}_smooth_bl'.format(line),
mask=molpath+'APEX_H2CO_321_220_smooth_bl_mask.fits',
maskpre='321')
do_mask_ch3oh(dpath=molpath)
for fn in glob.glob(os.path.join(molpath,'APEX_H2CO_3*fits')):
try:
os.symlink(fn,
os.path.join(h2copath,os.path.split(fn)[-1]))
log.info("Linked file {0} to {1}".format(fn, h2copath))
except OSError:
log.debug("Skipped file {0} because it exists".format(fn))
# moved to analysis doratio(h2copath=h2copath)
# moved to analysis do_temperature(ratio=False, h2copath=h2copath)
def contsub_cube(cubefilename,):
cube = fits.open(cubefilename+'.fits', memmap=False)
cont = fits.getdata(cubefilename+'_continuum.fits')
data = cube[0].data
cube[0].data = data - cont
cube.writeto(cubefilename+'_sub.fits', clobber=True)
def neighborly_masking(cube, sigma=1, roll=2):
"""
Try masking 1-sigma points surrounded by 1-sigma points
"""
noise = cube.std(axis=0)
mcube = cube > (noise*sigma)
mcube[:2,:,:] = mcube[-2:,:,:] = False
mcube2 = (mcube.astype('int16') + np.roll(mcube, 1, axis=0) +
np.roll(mcube, 2, axis=0) + np.roll(mcube, -1, axis=0) +
np.roll(mcube, -2, axis=0))
mask = mcube2 >= 3
return mask
def baseline_cube(cubefn, mask=None, maskfn=None, mask_level=None,
mask_level_sigma=None, order=5,
outfilename=None,
polyspline='poly', splinesampling=100):
"""
Baseline-subtract a data cube with polynomials or splines.
Can mask the cube first.
"""
from pyspeckit.cubes.cubes import baseline_cube
f = fits.open(cubefn)
cube = f[0].data
if mask is None:
if maskfn is not None:
mask = fits.getdata(maskfn).astype('bool')
if cube.shape != mask.shape:
raise ValueError("Cube and mask don't match.")
elif mask_level is not None:
mask = cube > mask_level
elif mask_level_sigma is not None:
mask = ((cube-cube.mean(axis=0)) >
(cube.std(axis=0)*mask_level_sigma))
t0 = time.time()
if polyspline == 'poly':
log.info("Baselining cube {0} with order {1}...".format(cubefn, order))
bc = baseline_cube(cube, polyorder=order, cubemask=mask)
elif polyspline == 'spline':
log.info("Baselining cube {0} with sample scale {1}...".format(cubefn,
splinesampling))
# Splines can't be pickled
bc = baseline_cube(cube, splineorder=order,
sampling=splinesampling, cubemask=mask,
numcores=1)
log.info("Baselining done ({0} seconds)".format(time.time()-t0))
f[0].data = bc
if outfilename is None:
outfilename = cubefn.replace(".fits","_bl.fits")
f.writeto(outfilename, clobber=True)
def do_everything_2013extrafreqs():
build_cube_2013(lowhigh='low',
scanblsub=False)
build_cube_2013(lowhigh='high',
scanblsub=False)
#raise NotImplementedError
#compute_noise_extras(lowhigh='low',pixrange=[0,4096])
#compute_noise_extras(lowhigh='high',pixrange=[0,4096])
def dopeaksn():
from FITS_tools import strip_headers
f = fits.open(h2copath+'APEX_H2CO_303_202.fits')
header = strip_headers.flatten_header(f[0].header)
f[0].header=header
f[0].data = f[0].data.max(axis=0)
n = fits.getdata(h2copath+'APEX_H2CO_merge_high_sub_noise.fits')
f[0].data /= n
f.writeto(h2copath+'APEX_H2CO_303_202_peaksn.fits',clobber=True)
f = fits.open(h2copath+'APEX_H2CO_303_202_smooth.fits')
header = strip_headers.flatten_header(f[0].header)
f[0].header=header
f[0].data = f[0].data.max(axis=0)
n = fits.getdata(h2copath+'APEX_H2CO_merge_high_smooth_noise.fits')
f[0].data /= n
f.writeto(h2copath+'APEX_H2CO_303_202_peaksn_smooth.fits',clobber=True)
def docleannhits():
""" not really used now """
f = fits.open(h2copath+'APEX_H2CO_merge_high_nhits.fits')
nh = f[0].data
nhm = scipy.ndimage.median_filter(nh, 5)
f[0].data = nhm
def mask_out_hc3n(maskhdu):
"""
HC3N shows up in some of the H2CO 303 cubes. We can exclude it from the
signal masks by shifting the mask to HC3N velocities and excluding any
regions detected in H2CO
"""
nu_hc3n = all_lines['HC3N_24-23']
nu_h2co = all_lines['H2CO_303_202']
v_hc3n = ((nu_hc3n - nu_h2co)/nu_h2co * constants.c).to(u.km/u.s).value
mask = maskhdu.data
dv = maskhdu.header['CDELT3']
shift = v_hc3n / dv
newmask = np.zeros_like(mask, dtype='bool')
newmask[np.abs(shift):,:,:] = mask[:-np.abs(shift),:,:].astype('bool')
maskhdu.data[newmask] = 0
# Need to convert to int because fits doesn't support bool
maskhdu.data = maskhdu.data.astype('int')
return maskhdu
def mask_out_ch3oh(smooth='_smooth', dpath=mergepath):
nu_ch3oh = all_lines['CH3OH_422_312']
nu_h2co = all_lines['H2CO_322_221']
v_ch3oh = ((nu_ch3oh - nu_h2co)/nu_h2co * constants.c).to(u.km/u.s).value
hdu = fits.open(dpath+'APEX_H2CO_322_221{0}.fits'.format(smooth))[0]
dv = hdu.header['CDELT3']
shift = v_ch3oh / dv
log.info("CH3OH Masking: dv: {0} shift: {1} ".format(dv,shift))
mask = fits.getdata(dpath+'APEX_H2CO_303_202{0}_mask.fits'.format(smooth)).astype('bool')
log.info("CH3OH Masking: Mask shape: {0}".format(mask.shape))
newmask = mask*False
log.info("CH3OH Masking: NewMask shape: {0}".format(newmask.shape))
newmask[np.abs(shift):,:,:] = mask[:-np.abs(shift),:,:]
log.info("CH3OH Masking: NewMask number of masked pixels: {0}".format(newmask.sum()))
hdu.data[newmask] = np.nan
hdu.writeto(dpath+'APEX_H2CO_322_221{0}_CH3OHchomped.fits'.format(smooth), clobber=True)
hdu.data[True-mask] = np.nan
hdu.writeto(dpath+'APEX_H2CO_322_221{0}_CH3OHchomped_masked.fits'.format(smooth), clobber=True)
integrate_mask(dpath+'APEX_H2CO_322_221{0}_CH3OHchomped'.format(smooth),
mask=dpath+'APEX_H2CO_303_202{0}_mask.fits'.format(smooth))
def do_mask_ch3oh(dpath=mergepath, vsmooth=False):
mask_out_ch3oh('', dpath=dpath)
# spatial smoothing = 2pix
mask_out_ch3oh('_smooth', dpath=dpath)
if vsmooth:
# spatial smoothing = 4pix
mask_out_ch3oh('_vsmooth', dpath=dpath)
mask_out_ch3oh('_bl', dpath=dpath)
# spatial smoothing = 2pix
mask_out_ch3oh('_smooth_bl', dpath=dpath)
if vsmooth:
# spatial smoothing = 4pix
mask_out_ch3oh('_vsmooth_bl', dpath=dpath)
def do_2014(datasets=datasets_2014, scanblsub=False):
#datasets = ['E-093.C-0144A.2014APR02/E-093.C-0144A-2014-2014-04-01',
# 'E-093.C-0144A.2014APR03/E-093.C-0144A-2014-2014-04-02']
#build_cube_2014('MAP_001', datasets=datasets, scanblsub=True, lowhigh='low')
#build_cube_2014('MAP_001', datasets=datasets, scanblsub=True, lowhigh='high')
#build_cube_2014('MAP_001', datasets=datasets, scanblsub=False, lowhigh='high_nosub')
for dataset in datasets:
for source in datasets[dataset]:
build_cube_2014(source, datasets=[dataset], scanblsub=scanblsub,
outpath=mergepath,
datapath=april2014path,
lowhigh='low')
build_cube_2014(source, datasets=[dataset], scanblsub=scanblsub,
outpath=mergepath,
datapath=april2014path,
lowhigh='high')
def do_2014_merge(datasets=datasets_2014,
lowhigh=('low','high')):
log.info("Starting merge")
if not isinstance(lowhigh, (tuple,list)):
if isinstance(lowhigh, str):
lowhigh = (lowhigh,)
else:
raise ValueError("Invalid lowhigh.")
for lh in lowhigh:
mergefile = 'APEX_H2CO_2014_merge_{0}'.format(lh)
log.info("Making blanks")
lowest_freq = 218.4e9 if lh=='high' else 216.9e9
make_blanks_merge(os.path.join(mergepath,mergefile), lowhigh=lh,
lowest_freq=lowest_freq, width=2.5*u.GHz)
mapnames = ['MAP_{0:03d}'.format(ii) for ii in range(1,130)]
log.info("Building cubes: "+str(mapnames)+" "+lh)
build_cube_2014(mapnames,
mergefile=mergefile,
outpath=mergepath,
datapath=april2014path,
lowhigh=lh,
datasets=datasets)
baseline_cube(os.path.join(mergepath,mergefile+".fits"),
polyspline='spline', mask_level_sigma=5)
def get_info_2014(datapath='/Users/adam/work/h2co/apex/april2014/',
datasets=datasets_2014):
info = {}
for dataset in datasets:
apex_filename=os.path.join(datapath,dataset)+".apex"
spectra,headers,indices = load_apex_cube(apex_filename)
info[dataset] = set([h['OBJECT'] for h in headers])
log.info("{0}:{1}".format(dataset, str(info[dataset])))
return info
def identify_scans_fromcoords(gal):
# identify where the *derivative* changes signs
# each np.diff shifts 1 to the left
# 2 np.diffs -> +2 to index
scans = 2+np.where(np.diff(np.sign(np.diff(gal.l.wrap_at(180*u.deg)))))[0]
return scans
def per_scan_fourier_clean(data, scans, mask_pixels=None,
verbose=False, smoothing_width=10,
automask=False, smooth_all=False,
smoothing_kernel_size_scale=40,
nsigma_ignore=1, return_mask=False):
"""
An implementation of the Emerson 1988 prescription for "scan noise" removal
performed in "scan space" rather than map space.
Parameters
----------
data : np.ndarray
2D data, with time along axis 0 and frequency along axis 1
scans : np.ndarray
The endpoints of the scans. Should not include 0 or naxis
verbose : bool
Print out simple stats about the fits
"""
raise NotImplementedError("Work in progress - maybe a bad idea")
# Create a new array for hosting the subtracted data
dsub = data*0
timeaxis = 0
freqaxis = 1
# Kernel must be ODD
kernel_size = smoothing_kernel_size_scale * smoothing_width
if kernel_size % 2 == 0:
kernel_size += 1
masklist = []
for ii,jj in zip([0]+scans.tolist(),
scans.tolist()+[data.shape[timeaxis]]):
x = np.arange(jj-ii)
y = data[ii:jj,:]
fty = np.fft.fft(y,axis=0)
ftf = np.fft.fftfreq(x)
# The components to suppress should be decided in the map plane...
return dsub
def subtract_scan_linear_fit(data, scans, mask_pixels=None,
verbose=False, smoothing_width=10,
automask=False, smooth_all=False,
smoothing_kernel_size_scale=40,
nsigma_ignore=1, return_mask=False):
"""
Use linear algebra to fit a time-baseline to each scan to remove spectral
baseline drifts.
WARNING: This may remove map-spanning signals!! That can be BAD for 13CO!
Source:
http://stackoverflow.com/questions/20343500/efficient-1d-linear-regression-for-each-element-of-3d-numpy-array
(includes a solution for masked arrays: this will be EXTREMELY useful!)
Parameters
----------
data : np.ndarray
2D data, with time along axis 0 and frequency along axis 1
scans : np.ndarray
The endpoints of the scans. Should not include 0 or naxis
divscale : bool
DISABLED: this is crazy
If True, will use only the slope and will divide out the normalized
slope rather than subtracting
mask_pixels : None or np.ndarray
A mask array to select pixels to interpolate the fits across in
the *Frequency* axis
automask : bool
Mask any scans with a mean > the overall mean + 1 stddev. The data are
slightly smoothed first if automask > 1.
verbose : bool
Print out simple stats about the fits
smoothing_kernel_size_scale : int
The size multiplier of the smoothing kernel used for interpolation in
the frequency domain; smoothing_kernel_size_scale * smoothing_width
defines the number of pixels to use when interpolating
nsigma_ignore : float
Fit masking control parameter. Pixels with values greater than the
mean noise + nsigma_ignore * std(mean_spectrum) will be ignored for
fitting then interpolated back over later
return_mask : bool
Return an array of the mask used for each scan
"""
#dmeans = data[:,percentile*data.shape[1]:(1-percentile)*data.shape[1]].mean(axis=1)
dsub = data*0
timeaxis = 0
freqaxis = 1
# Kernel must be ODD
kernel_size = smoothing_kernel_size_scale * smoothing_width
if kernel_size % 2 == 0:
kernel_size += 1
masklist = []
for ii,jj in zip([0]+scans.tolist(),
scans.tolist()+[data.shape[timeaxis]]):
x = np.arange(jj-ii)
if automask:
mean_spectrum = data[ii:jj,:].mean(axis=timeaxis)
if automask > 1:
mean_spectrum = convolve(mean_spectrum,
Gaussian1DKernel(stddev=automask))
mask_pixels = (mean_spectrum < (mean_spectrum.mean() +
nsigma_ignore*mean_spectrum.std()))
if verbose:
nflag = (~mask_pixels).sum()
log.info(("Masked {0} pixels for scanblsub fitting"
" in scan {1}-{2} "
"({3}%)").format(nflag, ii, jj,
nflag/float(mask_pixels.size),)
)
if mask_pixels is None:
y = data[ii:jj,:]
else:
# mask_pixels is an include mask
inds = np.arange(data.shape[freqaxis])[mask_pixels]
y = data[ii:jj,mask_pixels]
if return_mask and automask > 0:
masklist.append(mask_pixels)
# X is a vector of the X-values and a constant (1)
# Becomes set of equations y = m x + b || y = X mb
X = np.c_[x,np.ones(jj-ii)]
mb = np.linalg.lstsq(X,y)[0]
if mask_pixels is not None:
# Mask out the bad values, interpolate using a wide gaussian that
# ignores nans
m = np.zeros(data.shape[freqaxis]) + np.nan
m[inds] = mb[0,:]
m = convolve(m, Gaussian1DKernel(stddev=smoothing_width,
x_size=kernel_size))
b = np.zeros(data.shape[freqaxis]) + np.nan
b[inds] = mb[1,:]
b = convolve(b, Gaussian1DKernel(stddev=smoothing_width,
x_size=kernel_size))
# restore initial sampling unless we want smooth
if not smooth_all:
m[inds] = mb[0,:]
b[inds] = mb[1,:]
mb = np.array([m,b])
dsub[ii:jj,:] = data[ii:jj,:] - np.inner(X,mb.T)
log.info("Fit {0} scans with mean slopes {1} and offset {2}".format(len(scans)+1,
mb.mean(axis=1)[0],
mb.mean(axis=1)[1]))
if np.any(np.isnan(dsub)):
warnings.warn("There were NaNs left over from time-baseline subtraction.")
dsub[np.isnan(dsub)] = 0
if return_mask:
return dsub, np.array(masklist)
return dsub
def efuncs(arr, neig=None, return_others=False, huge_limit=500):
"""
Determine eigenfunctions of an array for use with
PCA cleaning
Parameters
----------
arr : `numpy.ndarray`
The array (2D)
neig : None or int
The number of eigenvalues to compute. Smaller = faster!
None = All!
huge_limit : int
The limit above which an error will be raised (for large arrays, this
can take *forever*)
return_others : bool
Return the evals, evects, and covmat or just the efuncs?
Returns
-------
efuncarr : np.ndarray
The eigenfunctions
Optional Returns
----------------
covmat : np.ndarray
Symmetric covariance matrix
evals : np.ndarray
1D array of eigenvalues
evects : np.ndarray
Eigenvectors
"""
if hasattr(arr,'filled'):
arr = arr.filled(0)
if arr.shape[1] > huge_limit and not neig:
log.critical("Very large eigenvalue computation!"
" Danger stranger! Stranger danger!")
import ipdb; ipdb.set_trace()
covmat = np.dot(arr.T.conj(),arr)
# assert covariance matrix is Hermitian
# (symmetric under transpose + conjugation)
if not (covmat.T.conj() == covmat).all():
diff = (covmat.T.conj() - covmat)
worst_diff_ind = np.argmax(np.abs(diff))
worst_diff = diff.flat[worst_diff_ind]/covmat.flat[worst_diff_ind]
log.warning("There are differences between the upper "
"and lower triangular components of the "
"covariance matrix; this is probably a "
"floating point error and should not be terrible."
" The relative error is {wd}.".format(wd=worst_diff))
if np.abs(worst_diff) > 1e-4:
log.warning("Actually, that's a pretty large error. "
"You may be in trouble.")
# Changed from np.linalg.eig to scipy.linalg.eigh
# and numpy.linalg.eigh, which both return values in
# the opposite order from np.linalg.eig
if neig:
sz = covmat.shape[1]
eva, eve = scipy.linalg.eigh(covmat,
eigvals=(sz-neig,sz-1))
# eigh returns values in opposit order from np.linalg.eig
# we also want a fully populated matrix so the size stays
# the same
inds = np.argsort(eva)[::-1]
evals = np.zeros(sz)
evals[:neig] = eva[inds]
evects = np.zeros([sz,sz])
evects[:, :neig] = eve[:,inds]
else:
evals,evects = np.linalg.eigh(covmat)
inds = np.argsort(evals)[::-1]
evals = evals[inds]
evects = evects[:,inds]
efuncarr = np.dot(arr,evects)
if return_others:
return efuncarr,covmat,evals,evects
else:
return efuncarr
def PCA_clean(data,
smoothing_scale=25., # should be ~200 for SEDIGISM
timeaxis=0,
freqaxis=1,
ncomponents=3,
diagplotfilename=None,
scans=None,
maxntimes=5000,
):
"""
Remove N PCA components in the time direction
TODO: speed up by downsampling in TIME as well; we don't expect large
second-to-second variations REVISE: No, actually, there are sharp
jumps in time.
Maybe scan-by-scan pca is faster?
Smoothing scale is ~200 in total, which means 25 for pre-downsampled
CMZ data
Parameters
----------
data : `numpy.ndarray`
2D data, with dimensions ``[times, frequencies]`` (or reversed if
``timeaxis`` and ``freqaxis`` are appropriately specified)
smoothing_scale : float
The scale over which frequencies should be smoothed prior to performing
the PCA analysis. This is the width of a gaussian. The data will be
downsampled by a factor (1/5)*smoothing_scale
timeaxis : int
freqaxis : int
The axis #'s of the frequency and time data
ncomponents : int
The number of PCA components to remove. 3 is empirically decent, but
it's very important to test this #
diagplotfilename : None or str
A filename to save a diagnostic plot in. The plot shows the first
``ncomponents`` eigenfunctions.
scans : list
A list of scans. If these are specified, the PCA analysis will be done
on a scan-by-scan basis, in which the most-correlated N components will
be identified in each scan. This is not obviously the best thing to
do, but it can be useful.
maxntimes : int or None
If specified, the timestream will be chunked out into sections with
length < maxntimes before doing PCA computations. In principle, this
can be used to overcome memory limitations, but it should be used with
caution as the locations of the splits are somewhat arbitrary and could
result in different principle component selections if the data aren't
well-behaved.
"""
if freqaxis == 0 and timeaxis == 1:
data = data.swapaxes(0,1)
elif freqaxis != 1 or timeaxis != 0:
raise ValueError("Invalid axis specification.")
if np.any(np.isnan(data)):
warnings.warn("There were NaNs in the PCA-target data")
import ipdb; ipdb.set_trace()
data = np.nan_to_num(data)
if maxntimes and scans is None:
ntimes = data.shape[0]
if ntimes > maxntimes:
nsplits = np.ceil(ntimes/float(maxntimes))
length = ntimes/nsplits
# Split with equal length, but leave out the starting point
# and the end point since those are both added
splits = np.linspace(0, ntimes, nsplits+1)[1:-1]
scans = splits.astype('int')
if scans is not None:
all_data = data
all_dsub = np.empty(data.shape)
for start,end in zip([0]+scans.tolist(),
scans.tolist()+[data.shape[0]]):
log.info("Computing PCA on an array with shape"
" {0}".format(data[start:end,:].shape))
dsub,efuncarr = PCA_subtract(data[start:end,:],
smoothing_scale=smoothing_scale,
ncomponents=ncomponents)
if start == 0:
efuncs = efuncarr[:,:ncomponents]
else:
efuncs += efuncarr[:,:ncomponents]
all_dsub[start:end,:] = dsub
dsub = all_dsub
efuncarr = efuncs / (len(scans)+1.) # Average removed efuncs
else:
log.info("Computing PCA on an array with shape"
" {0}".format(data.shape))
dsub,efuncarr = PCA_subtract(data,
smoothing_scale=smoothing_scale,
ncomponents=ncomponents)
if diagplotfilename is not None:
fig = pl.figure(4)
fig.clf()
ax = fig.gca()
for ii in range(ncomponents):
ax.plot(efuncarr[:,ii], label=str(ii), linewidth=2, alpha=0.5)
ax.legend(loc='best')
checkdir_makedir(diagplotfilename)
fig.savefig(diagplotfilename, bbox_inches='tight')
if freqaxis == 0 and timeaxis == 1:
dsub = dsub.swapaxes(0,1)
return dsub.real
def PCA_subtract(data, smoothing_scale=None, ncomponents=3):
"""
Parameters
----------
data : `numpy.ndarray`
2D data, with dimensions (times, frequencies)
smoothing_scale : float
The scale over which frequencies should be smoothed prior to performing
the PCA analysis. This is the width of a gaussian. The data will be
downsampled by a factor (1/5)*smoothing_scale
Returns
-------
dsub : `numpy.ndarray`
The data with ``ncomponents`` principle components removed
efuncarr :
"""
t0 = time.time()
log.info("PCA will remove {0} components".format(ncomponents))
if smoothing_scale:
log.info(("PCA cleaning an image with size {0},"
" which will downsample to {1}").format(data.shape,
(data.shape[0],
data.shape[1]/(smoothing_scale/5))))
sm_data = filters.gaussian_filter1d(data, smoothing_scale,
axis=1, mode='mirror').real
efuncarr,covmat,evals,evects = efuncs(sm_data[:,::smoothing_scale/5].T,
neig=ncomponents,
huge_limit=1000,
return_others=True)
else:
log.info("PCA cleaning an image with size {0}".format(data.shape))
efuncarr,covmat,evals,evects = efuncs(data.T,
neig=ncomponents,
huge_limit=1000,
return_others=True)
log.info("Completed PCA (eigenfunction/vector) computation"
" in {0} seconds.".format(time.time()-t0))
# Zero-out the components we want to keep
# (technically no longer necessary: this should be a null operation)
efuncarr[:,ncomponents:] = 0
to_subtract = np.inner(efuncarr,evects).T
if smoothing_scale:
ifunc = interpolate.interp1d(np.arange(to_subtract.shape[1]),
to_subtract,
axis=1)
to_subtract = ifunc(np.linspace(0, to_subtract.shape[1]-1, data.shape[1]))
dsub = data - to_subtract
return dsub, efuncarr
def _is_sci(source, sourcereg='MAP'):
return (((sourcereg in source)) and
('SKY' not in source) and
('TCAL' not in source) and
('TREC' not in source) and
('TSYS' not in source) and
('HOT' not in source) and
('COLD' not in source))
def get_source_tel_line(apex_filename):
if 'M-093' in apex_filename or 'E-093' in apex_filename:
sourcereg = 'MAP'
line = 'shfi219ghz'
telescopes = ['AP-H201-X201', 'AP-H201-X202']
elif 'M-091' in apex_filename:
sourcereg = 'SGRA'
line = 'H2CO(3-2)'
telescopes = ['AP-H201-X201', 'AP-H201-X202']
elif 'O-085' in apex_filename:
sourcereg = 'SGRA'
line = 'H2CO(3-2)'
telescopes = ['AP-H201-F101', 'AP-H201-F102']
elif 'E-085' in apex_filename:
sourcereg = 'SGRA'
line = 'H2CO32'
telescopes = ['AP-H201-F101', 'AP-H201-F102']
else:
raise ValueError("Data selected is not from ao, 2013 or 2014")
return sourcereg,line,telescopes
def compute_and_save_pca_components(apex_filename, ncomponents=5,
suppress_endpoints=4, redo=True):
log.info("Starting {0}".format(apex_filename))
outdir = os.path.join(os.path.dirname(apex_filename),
os.path.splitext(os.path.basename(apex_filename))[0])
if not os.path.isdir(outdir):
os.mkdir(outdir)
sourcereg,line,telescopes = get_source_tel_line(apex_filename)
if not redo and all([os.path.exists(
os.path.join(outdir,
'{1}_pca_component_{0}_els0.fits'.
format(ii,tel)))
for ii in range(ncomponents)
for tel in telescopes]):
log.info("Skipping {0} because it's been done".format(apex_filename))
return
log.info("Outdir is {0}".format(outdir))
cl = read_class.ClassObject(apex_filename)
for telescope in cl.getinfo()['tels']:
if 'PA' not in telescope:
selection = [x
for source in cl.sources
if _is_sci(source, sourcereg)
for x in cl.select_spectra(telescope=telescope,
line=line,
source=source)]
mmdata,headers = zip(*cl.read_observations(selection, progressbar=True))
log.info("Converting data to an array by every 1000 elts"
" out of {0} total (memory use should rise here)".
format(len(mmdata)))
for jj in range(len(mmdata) / 1000 + 1):
log.info('Elements {0}-{1}'.format(jj*1000,
min((jj+1)*1000,
len(mmdata))))
data = np.asarray(mmdata[jj*1000:(jj+1)*1000])
# Endpoints can be ~1e14
bad = abs(data) > 1e9
nbad = np.count_nonzero(bad)
if nbad > 0:
log.info("Found {0} bad values".format(nbad))
data[bad] = 0
log.info('Computing eigenfunctions (intensive step)')
efuncarr,covmat,evals,evects = efuncs(data.T,
neig=ncomponents,
huge_limit=1000,
return_others=True)
log.info("Writing PCA components to disk. This step should be fast.")
header = classheader_to_fitsheader(headers[0])
evals_norm = evals/evals.sum()
for ii in range(ncomponents):
header['PCACOMP'] = ii
header['EVAL'] = evals_norm[ii]
hdu = fits.PrimaryHDU(data=efuncarr[:,ii], header=header)
hdu.writeto(os.path.join(outdir,
'{2}_pca_component_{0}_els{1}.fits'.
format(ii,jj,telescope)),
clobber=True,
output_verify='fix')
# Re-do the correlations using those PCA components
log.info("Re-computing PCA using the sub-components.")
data = np.array([fits.getdata(os.path.join(outdir,
'{2}_pca_component_{0}_els{1}.fits'.
format(ii,jj,telescope)))
for ii in range(ncomponents)
for jj in range(len(mmdata) / 1000 + 1)])
efuncarr,covmat,evals,evects = efuncs(data.T,
neig=ncomponents,
huge_limit=1000,
return_others=True)
evals_norm = evals/evals.sum()
for ii in range(ncomponents):
header['PCACOMP'] = ii
header['EVAL'] = evals_norm[ii]
hdu = fits.PrimaryHDU(data=efuncarr[:,ii], header=header)
hdu.writeto(os.path.join(outdir,
'{1}_pca_component_{0}.fits'.
format(ii,telescope)),
clobber=True,
output_verify='fix')
log.info("Completed {0}".format(apex_filename))
def do_all_pcacomponents(redo=True, **kwargs):
for fn in all_apexfiles:
try:
compute_and_save_pca_components(fn, redo=redo, **kwargs)
plot_pca_components(fn)
except Exception as ex:
log.error("Error: {0}".format(ex))
print(ex)
continue
def plot_pca_components(apex_filename, ncomponents=3):
log.info("Plotting {0}".format(apex_filename))
outdir = os.path.join(os.path.dirname(apex_filename),
os.path.splitext(os.path.basename(apex_filename))[0])
fig1 = pl.figure(1)
fig1.clf()
fig2 = pl.figure(2)
fig2.clf()
figs = [fig1,fig2]
for fglob in [os.path.join(outdir, '*_pca_component_{0}.fits'.format(ii))
for ii in range(ncomponents)]:
files = glob.glob(fglob)
for jj,(fn,fig) in enumerate(zip(files,figs)):
data = fits.getdata(fn)
ax1 = fig.add_subplot(2,1,1)
ax1.plot(data, ',', label=str(jj))
ft = np.fft.fft(data)
ftf = np.fft.fftfreq(data.size)
ax2 = fig.add_subplot(2,1,2)
ax2.loglog(ftf[ftf>=0], abs(ft[ftf>=0]), label=str(jj), alpha=0.5)
fig1.savefig(files[0].replace(".fits",".png"))
fig2.savefig(files[1].replace(".fits",".png"))
log.info("Done plotting {0}".format(apex_filename))
def extract_mean_abs_spectra(apex_filename):
outdir = os.path.join(os.path.dirname(apex_filename),
os.path.splitext(os.path.basename(apex_filename))[0])
if not os.path.isdir(outdir):
os.mkdir(outdir)
sourcereg,line,telescopes = get_source_tel_line(apex_filename)
cl = read_class.ClassObject(apex_filename)
for telescope in cl.getinfo()['tels']:
if 'PA' not in telescope:
selection = [x
for source in cl.sources
if _is_sci(source, sourcereg)
for x in cl.select_spectra(telescope=telescope,
line=line,
source=source)]
# Only do first 10000
# 1e4 * 2**15 * 4 = 1.31 GB
mmdata,headers = zip(*cl.read_observations(selection[:10000], progressbar=True))
header = classheader_to_fitsheader(headers[0])
header['LINE1'] = 'mean(abs)'
header['LINE2'] = 'std(abs)'
del headers
data = np.abs(np.array(mmdata, dtype='float32'))
del mmdata
dft = np.fft.fft(data, axis=1)
dftmeanabs = np.abs(dft).mean(axis=0).astype('float32')
del dft
absdata = np.abs(data).astype('float32')
del data
meanabs = (absdata).mean(axis=0).astype('float32')
stdabs = (absdata).std(axis=0).astype('float32')
darr = np.array([meanabs,stdabs,dftmeanabs])
assert darr.shape == (3, meanabs.size)
hdu = fits.PrimaryHDU(data=darr, header=header)
hdu.writeto(os.path.join(outdir,
'{0}_meanabsspec.fits'.format(telescope)),
clobber=True,
output_verify='fix')
def plot_mean_abs_spectrum(apex_filename, ncomponents=3):
log.info("Plotting {0}".format(apex_filename))
basename = os.path.splitext(os.path.basename(apex_filename))[0]
outdir = os.path.join(os.path.dirname(apex_filename), basename)
fig1 = pl.figure(1)
fig1.clf()
pl.title(basename)
fig2 = pl.figure(2)
fig2.clf()
figs = [fig1,fig2]
fglob = os.path.join(outdir, '*_meanabsspec.fits')
files = glob.glob(fglob)
for jj,(fn,fig) in enumerate(zip(files,figs)):
mspec, sspec, ftabs = fits.getdata(fn)
ax1 = fig.add_subplot(2,1,1)
ax1.plot(mspec-np.median(mspec), ',', label=str(jj))
mmad = mad_std(mspec)
ax1.set_ylim(mmad*-10, mmad*10)
ax1.set_title(basename)
ft = np.fft.fft(mspec)
ftf = np.fft.fftfreq(mspec.size)
ax2 = fig.add_subplot(2,1,2)
ax2.loglog(ftf[ftf>=0], abs(ft[ftf>=0]), label=str(jj), alpha=0.5)
ax2.loglog(ftf[ftf>=0], abs(ftabs[ftf>=0]), alpha=0.5)
ax2.set_xlim(ftf.min(), ftf.max())
fig.savefig(fn.replace(".fits",".png"), bbox_inches='tight')
log.info("Done plotting {0}".format(apex_filename))
def do_all_meanabsspectra(**kwargs):
for fn in all_apexfiles:
extract_mean_abs_spectra(fn, **kwargs)
plot_mean_abs_spectrum(fn)
#except Exception as ex:
# log.error("Error: {0}".format(ex))
# print(ex)
# continue
def extract_co_subcubes(mergepath=april2014path):
extract_subcube(os.path.join(mergepath,'APEX_H2CO_2014_merge_high.fits'),
os.path.join(mergepath,'APEX_13CO_2014_merge.fits'),
linefreq=220.39868*u.GHz, naxis3=500, vmin=-225*u.km/u.s,
vmax=275*u.km/u.s)
extract_subcube(os.path.join(mergepath,'APEX_H2CO_2014_merge_high.fits'),
os.path.join(mergepath,'APEX_C18O_2014_merge.fits'),
linefreq=219.56036*u.GHz, naxis3=500, vmin=-225*u.km/u.s,
vmax=275*u.km/u.s)
extract_subcube(os.path.join(mergepath,'APEX_H2CO_2014_merge_high.fits'),
os.path.join(h2copath,'APEX_13CO_matched_H2CO.fits'),
linefreq=220.39868*u.GHz,)
extract_subcube(os.path.join(mergepath,'APEX_H2CO_2014_merge_high.fits'),
os.path.join(h2copath,'APEX_C18O_matched_H2CO.fits'),
linefreq=219.56036*u.GHz,)
extract_subcube(os.path.join(mergepath,'APEX_H2CO_2014_merge_high.fits'),
os.path.join(h2copath,'APEX_13CO_matched_H2CO_smooth.fits'),
linefreq=220.39868*u.GHz, smooth=True)
extract_subcube(os.path.join(mergepath,'APEX_H2CO_2014_merge_high.fits'),
os.path.join(h2copath,'APEX_C18O_matched_H2CO_smooth.fits'),
linefreq=219.56036*u.GHz, smooth=True)
signal_to_noise_mask_cube(os.path.join(h2copath,'APEX_13CO_matched_H2CO_smooth'),
noise=fits.getdata(os.path.join(mergepath,
'APEX_H2CO_merge_high_plait_all_noise.fits')))
signal_to_noise_mask_cube(os.path.join(h2copath,'APEX_13CO_matched_H2CO'),
noise=fits.getdata(os.path.join(mergepath,
'APEX_H2CO_merge_high_plait_all_smooth_noise.fits')))
def quick_extract_13cocube(fn, snthreshold=3, overwrite=True, intrange=None):
if fits.getheader(fn)['NAXIS'] > 2:
cube = SpectralCube.read(fn).with_spectral_unit(u.km/u.s,
rest_value=220.39868*u.GHz,
velocity_convention='radio')
cocube = cube.spectral_slab(-200*u.km/u.s, 200*u.km/u.s)
cocube.write(fn[:-5]+"_13COcube.fits", overwrite=overwrite)
noise = cube.std(axis=0)
noise.hdu.writeto(fn[:-5]+"_noise.fits", clobber=overwrite)
sn = cocube.filled_data[:]/noise
comask = cocube.with_mask(BooleanArrayMask(sn > snthreshold, wcs=cocube._wcs))
if intrange is None:
coint = comask.moment0()
else:
coint = comask.spectral_slab(intrange[0], intrange[1]).moment0()
coint.write(fn[:-5]+"_13COmaskintegrated.fits", overwrite=overwrite)
coint2 = cocube.spectral_slab(intrange[0], intrange[1]).moment0()
coint2.write(fn[:-5]+"_13COintegrated.fits", overwrite=overwrite)
def cal_date_overlap(dates1, calibration_factors=calibration_factors):
for k in calibration_factors:
if k is not None:
d1,d2 = Time(k.split(":"))
if dates1[0] < d2 and dates1[1] > d1:
return k
| bsd-3-clause |
gVallverdu/pymatgen | pymatgen/analysis/magnetism/heisenberg.py | 2 | 37618 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a simple algorithm for extracting nearest neighbor
exchange parameters by mapping low energy magnetic orderings to a Heisenberg
model.
"""
import logging
import sys
from ast import literal_eval
import copy
import numpy as np
import pandas as pd
from monty.serialization import dumpfn
from monty.json import MSONable, jsanitize
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen import Structure
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "June 2019"
class HeisenbergMapper:
"""
Class to compute exchange parameters from low energy magnetic orderings.
"""
def __init__(self, ordered_structures, energies, cutoff=0.0, tol=0.02):
"""
Exchange parameters are computed by mapping to a classical Heisenberg
model. Strategy is the scheme for generating neighbors. Currently only
MinimumDistanceNN is implemented.
n+1 unique orderings are required to compute n exchange
parameters.
First run a MagneticOrderingsWF to obtain low energy collinear magnetic
orderings and find the magnetic ground state. Then enumerate magnetic
states with the ground state as the input structure, find the subset
of supercells that map to the ground state, and do static calculations
for these orderings.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Total energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
Defaults to 0 (only NN, no NNN, etc.)
tol (float): Tolerance (in Angstrom) on nearest neighbor distances
being equal.
Parameters:
strategy (object): Class from pymatgen.analysis.local_env for
constructing graphs.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom)
"""
# Save original copies of inputs
self.ordered_structures_ = ordered_structures
self.energies_ = energies
# Sanitize inputs and optionally order them by energy / magnetic moments
hs = HeisenbergScreener(ordered_structures, energies, screen=False)
ordered_structures = hs.screened_structures
energies = hs.screened_energies
self.ordered_structures = ordered_structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
# Get graph representations
self.sgraphs = self._get_graphs(cutoff, ordered_structures)
# Get unique site ids and wyckoff symbols
self.unique_site_ids, self.wyckoff_ids = self._get_unique_sites(
ordered_structures[0]
)
# These attributes are set by internal methods
self.nn_interactions = None
self.dists = None
self.ex_mat = None
self.ex_params = None
# Check how many commensurate graphs we found
if len(self.sgraphs) < 2:
print("We need at least 2 unique orderings.")
sys.exit(1)
else: # Set attributes
self._get_nn_dict()
self._get_exchange_df()
@staticmethod
def _get_graphs(cutoff, ordered_structures):
"""
Generate graph representations of magnetic structures with nearest
neighbor bonds. Right now this only works for MinimumDistanceNN.
Args:
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
ordered_structures (list): Structure objects.
Returns:
sgraphs (list): StructureGraph objects.
"""
# Strategy for finding neighbors
if cutoff:
strategy = MinimumDistanceNN(cutoff=cutoff, get_all_sites=True)
else:
strategy = MinimumDistanceNN() # only NN
# Generate structure graphs
sgraphs = [
StructureGraph.with_local_env_strategy(s, strategy=strategy)
for s in ordered_structures
]
return sgraphs
@staticmethod
def _get_unique_sites(structure):
"""
Get dict that maps site indices to unique identifiers.
Args:
structure (Structure): ground state Structure object.
Returns:
unique_site_ids (dict): maps tuples of equivalent site indices to a
unique int identifier
wyckoff_ids (dict): maps tuples of equivalent site indices to their
wyckoff symbols
"""
# Get a nonmagnetic representation of the supercell geometry
s0 = CollinearMagneticStructureAnalyzer(
structure, make_primitive=False, threshold=0.0
).get_nonmagnetic_structure(make_primitive=False)
# Get unique sites and wyckoff positions
if "wyckoff" in s0.site_properties:
s0.remove_site_property("wyckoff")
symm_s0 = SpacegroupAnalyzer(s0).get_symmetrized_structure()
wyckoff = ["n/a"] * len(symm_s0)
equivalent_indices = symm_s0.equivalent_indices
wyckoff_symbols = symm_s0.wyckoff_symbols
# Construct dictionaries that map sites to numerical and wyckoff
# identifiers
unique_site_ids = {}
wyckoff_ids = {}
i = 0
for indices, symbol in zip(equivalent_indices, wyckoff_symbols):
unique_site_ids[tuple(indices)] = i
wyckoff_ids[i] = symbol
i += 1
for index in indices:
wyckoff[index] = symbol
return unique_site_ids, wyckoff_ids
def _get_nn_dict(self):
"""Get dict of unique nearest neighbor interactions.
Returns:
None: (sets self.nn_interactions and self.dists instance variables)
"""
tol = self.tol # tolerance on NN distances
sgraph = self.sgraphs[0]
unique_site_ids = self.unique_site_ids
nn_dict = {}
nnn_dict = {}
nnnn_dict = {}
all_dists = []
# Loop over unique sites and get neighbor distances up to NNNN
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
dists = [round(cs[-1], 2) for cs in connected_sites] # i<->j distances
dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
dists = dists[:3] # keep up to NNNN
all_dists += dists
# Keep only up to NNNN and call dists equal if they are within tol
all_dists = sorted(list(set(all_dists)))
rm_list = []
for idx, d in enumerate(all_dists[:-1]):
if abs(d - all_dists[idx + 1]) < tol:
rm_list.append(idx + 1)
all_dists = [d for idx, d in enumerate(all_dists) if idx not in rm_list]
if len(all_dists) < 3: # pad with zeros
all_dists += [0.0] * (3 - len(all_dists))
all_dists = all_dists[:3]
labels = ["nn", "nnn", "nnnn"]
dists = {l: d for (l, d) in zip(labels, all_dists)}
# Get dictionary keys for interactions
for k in unique_site_ids:
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
# Loop over sites and determine unique NN, NNN, etc. interactions
for cs in connected_sites:
dist = round(cs[-1], 2) # i_j distance
j = cs[2] # j index
for key in unique_site_ids.keys():
if j in key:
j_key = unique_site_ids[key]
if abs(dist - dists["nn"]) <= tol:
nn_dict[i_key] = j_key
elif abs(dist - dists["nnn"]) <= tol:
nnn_dict[i_key] = j_key
elif abs(dist - dists["nnnn"]) <= tol:
nnnn_dict[i_key] = j_key
nn_interactions = {"nn": nn_dict, "nnn": nnn_dict, "nnnn": nnnn_dict}
self.dists = dists
self.nn_interactions = nn_interactions
def _get_exchange_df(self):
"""
Loop over all sites in a graph and count the number and types of
nearest neighbor interactions, computing +-|S_i . S_j| to construct
a Heisenberg Hamiltonian for each graph.
Returns:
None: (sets self.ex_mat instance variable)
TODO:
* Deal with large variance in |S| across configs
"""
sgraphs = self.sgraphs
tol = self.tol
unique_site_ids = self.unique_site_ids
nn_interactions = self.nn_interactions
dists = self.dists
# Get |site magmoms| from FM ordering so that S_i and S_j are consistent?
# Large S variations is throwing a loop
# fm_struct = self.get_low_energy_orderings()[0]
# Total energy and nonmagnetic energy contribution
columns = ["E", "E0"]
# Get labels of unique NN interactions
for k0, v0 in nn_interactions.items():
for i, j in v0.items(): # i and j indices
c = str(i) + "-" + str(j) + "-" + str(k0)
c_rev = str(j) + "-" + str(i) + "-" + str(k0)
if c not in columns and c_rev not in columns:
columns.append(c)
num_sgraphs = len(sgraphs)
# Keep n interactions (not counting 'E') for n+1 structure graphs
columns = columns[: num_sgraphs + 1]
num_nn_j = len(columns) - 1 # ignore total energy
j_columns = [name for name in columns if name not in ["E", "E0"]]
ex_mat_empty = pd.DataFrame(columns=columns)
ex_mat = ex_mat_empty.copy()
if len(j_columns) < 2:
self.ex_mat = ex_mat # Only <J> can be calculated here
else:
sgraphs_copy = copy.deepcopy(sgraphs)
sgraph_index = 0
# Loop over all sites in each graph and compute |S_i . S_j|
# for n+1 unique graphs to compute n exchange params
for graph in sgraphs:
sgraph = sgraphs_copy.pop(0)
ex_row = pd.DataFrame(
np.zeros((1, num_nn_j + 1)), index=[sgraph_index], columns=columns
)
for i, node in enumerate(sgraph.graph.nodes):
# s_i_sign = np.sign(sgraph.structure.site_properties['magmom'][i])
s_i = sgraph.structure.site_properties["magmom"][i]
for k in unique_site_ids.keys():
if i in k:
i_index = unique_site_ids[k]
# Get all connections for ith site and compute |S_i . S_j|
connections = sgraph.get_connected_sites(i)
# dists = [round(cs[-1], 2) for cs in connections] # i<->j distances
# dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
for j, connection in enumerate(connections):
j_site = connection[2]
dist = round(connection[-1], 2) # i_j distance
# s_j_sign = np.sign(sgraph.structure.site_properties['magmom'][j_site])
s_j = sgraph.structure.site_properties["magmom"][j_site]
for k in unique_site_ids.keys():
if j_site in k:
j_index = unique_site_ids[k]
# Determine order of connection
if abs(dist - dists["nn"]) <= tol:
order = "-nn"
elif abs(dist - dists["nnn"]) <= tol:
order = "-nnn"
elif abs(dist - dists["nnnn"]) <= tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in ex_mat.columns:
ex_row.at[sgraph_index, j_ij] -= s_i * s_j
elif j_ji in ex_mat.columns:
ex_row.at[sgraph_index, j_ji] -= s_i * s_j
# Ignore the row if it is a duplicate to avoid singular matrix
if ex_mat.append(ex_row)[j_columns].equals(
ex_mat.append(ex_row)[j_columns].drop_duplicates(keep="first")
):
e_index = self.ordered_structures.index(sgraph.structure)
ex_row.at[sgraph_index, "E"] = self.energies[e_index]
sgraph_index += 1
ex_mat = ex_mat.append(ex_row)
# if sgraph_index == num_nn_j: # check for zero columns
# zeros = [b for b in (ex_mat[j_columns] == 0).all(axis=0)]
# if True in zeros:
# sgraph_index -= 1 # keep looking
ex_mat[j_columns] = ex_mat[j_columns].div(
2.0
) # 1/2 factor in Heisenberg Hamiltonian
ex_mat[["E0"]] = 1 # Nonmagnetic contribution
# Check for singularities and delete columns with all zeros
zeros = [b for b in (ex_mat == 0).all(axis=0)]
if True in zeros:
c = ex_mat.columns[zeros.index(True)]
ex_mat = ex_mat.drop(columns=[c], axis=1)
# ex_mat = ex_mat.drop(ex_mat.tail(len_zeros).index)
# Force ex_mat to be square
ex_mat = ex_mat[: ex_mat.shape[1] - 1]
self.ex_mat = ex_mat
def get_exchange(self):
"""
Take Heisenberg Hamiltonian and corresponding energy for each row and
solve for the exchange parameters.
Returns:
ex_params (dict): Exchange parameter values (meV/atom).
"""
ex_mat = self.ex_mat
# Solve the matrix equation for J_ij values
E = ex_mat[["E"]]
j_names = [j for j in ex_mat.columns if j not in ["E"]]
# Only 1 NN interaction
if len(j_names) < 3:
# Estimate exchange by J ~ E_AFM - E_FM
j_avg = self.estimate_exchange()
ex_params = {"<J>": j_avg}
self.ex_params = ex_params
return ex_params
# Solve eigenvalue problem for more than 1 NN interaction
H = ex_mat.loc[:, ex_mat.columns != "E"].values
H_inv = np.linalg.inv(H)
j_ij = np.dot(H_inv, E)
# Convert J_ij to meV
j_ij[1:] *= 1000 # J_ij in meV
j_ij = j_ij.tolist()
ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij)}
self.ex_params = ex_params
return ex_params
def get_low_energy_orderings(self):
"""
Find lowest energy FM and AFM orderings to compute E_AFM - E_FM.
Returns:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy
afm_e (float): afm energy
"""
fm_struct, afm_struct = None, None
mag_min = np.inf
mag_max = 0.001
fm_e_min = 0
afm_e_min = 0
# epas = [e / len(s) for (e, s) in zip(self.energies, self.ordered_structures)]
for s, e in zip(self.ordered_structures, self.energies):
ordering = CollinearMagneticStructureAnalyzer(
s, threshold=0.0, make_primitive=False
).ordering
magmoms = s.site_properties["magmom"]
# Try to find matching orderings first
if ordering == Ordering.FM and e < fm_e_min:
fm_struct = s
mag_max = abs(sum(magmoms))
fm_e = e
fm_e_min = e
if ordering == Ordering.AFM and e < afm_e_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
# Brute force search for closest thing to FM and AFM
if not fm_struct or not afm_struct:
for s, e in zip(self.ordered_structures, self.energies):
magmoms = s.site_properties["magmom"]
if abs(sum(magmoms)) > mag_max: # FM ground state
fm_struct = s
fm_e = e
mag_max = abs(sum(magmoms))
# AFM ground state
if abs(sum(magmoms)) < mag_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
elif abs(sum(magmoms)) == 0 and mag_min == 0:
if e < afm_e_min:
afm_struct = s
afm_e = e
afm_e_min = e
# Convert to magnetic structures with 'magmom' site property
fm_struct = CollinearMagneticStructureAnalyzer(
fm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
afm_struct = CollinearMagneticStructureAnalyzer(
afm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
return fm_struct, afm_struct, fm_e, afm_e
def estimate_exchange(self, fm_struct=None, afm_struct=None, fm_e=None, afm_e=None):
"""
Estimate <J> for a structure based on low energy FM and AFM orderings.
Args:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy/atom
afm_e (float): afm energy/atom
Returns:
j_avg (float): Average exchange parameter (meV/atom)
"""
# Get low energy orderings if not supplied
if any(arg is None for arg in [fm_struct, afm_struct, fm_e, afm_e]):
fm_struct, afm_struct, fm_e, afm_e = self.get_low_energy_orderings()
magmoms = fm_struct.site_properties["magmom"]
# Normalize energies by number of magnetic ions
# fm_e = fm_e / len(magmoms)
# afm_e = afm_e / len(afm_magmoms)
m_avg = np.mean([np.sqrt(m ** 2) for m in magmoms])
# If m_avg for FM config is < 1 we won't get sensibile results.
if m_avg < 1:
iamthedanger = """
Local magnetic moments are small (< 1 muB / atom). The
exchange parameters may be wrong, but <J> and the mean
field critical temperature estimate may be OK.
"""
logging.warning(iamthedanger)
delta_e = afm_e - fm_e # J > 0 -> FM
j_avg = delta_e / (m_avg ** 2) # eV / magnetic ion
j_avg *= 1000 # meV / ion
return j_avg
def get_mft_temperature(self, j_avg):
"""
Crude mean field estimate of critical temperature based on <J> for
one sublattice, or solving the coupled equations for a multisublattice
material.
Args:
j_avg (float): j_avg (float): Average exchange parameter (meV/atom)
Returns:
mft_t (float): Critical temperature (K)
"""
num_sublattices = len(self.unique_site_ids)
k_boltzmann = 0.0861733 # meV/K
# Only 1 magnetic sublattice
if num_sublattices == 1:
mft_t = 2 * abs(j_avg) / 3 / k_boltzmann
else: # multiple magnetic sublattices
omega = np.zeros((num_sublattices, num_sublattices))
ex_params = self.ex_params
ex_params = {k: v for (k, v) in ex_params.items() if k != "E0"} # ignore E0
for k in ex_params:
# split into i, j unique site identifiers
sites = [elem for elem in k.split("-")]
sites = [int(num) for num in sites[:2]] # cut 'nn' identifier
i, j = sites[0], sites[1]
omega[i, j] += ex_params[k]
omega[j, i] += ex_params[k]
omega = omega * 2 / 3 / k_boltzmann
eigenvals, eigenvecs = np.linalg.eig(omega)
mft_t = max(eigenvals)
if mft_t > 1500: # Not sensible!
stayoutofmyterritory = """
This mean field estimate is too high! Probably
the true low energy orderings were not given as inputs.
"""
logging.warning(stayoutofmyterritory)
return mft_t
def get_interaction_graph(self, filename=None):
"""
Get a StructureGraph with edges and weights that correspond to exchange
interactions and J_ij values, respectively.
Args:
filename (str): if not None, save interaction graph to filename.
Returns:
igraph (StructureGraph): Exchange interaction graph.
"""
structure = self.ordered_structures[0]
sgraph = self.sgraphs[0]
igraph = StructureGraph.with_empty_graph(
structure, edge_weight_name="exchange_constant", edge_weight_units="meV"
)
if "<J>" in self.ex_params: # Only <J> is available
warning_msg = """
Only <J> is available. The interaction graph will not tell
you much.
"""
logging.warning(warning_msg)
# J_ij exchange interaction matrix
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
j = c[2] # index of neighbor
dist = c[-1] # i <-> j distance
j_exc = self._get_j_exc(i, j, dist)
igraph.add_edge(
i, j, to_jimage=jimage, weight=j_exc, warn_duplicates=False
)
# Save to a json file if desired
if filename:
if filename.endswith(".json"):
dumpfn(igraph, filename)
else:
filename += ".json"
dumpfn(igraph, filename)
return igraph
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites
(10E-2 precision)
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
def get_heisenberg_model(self):
"""Save results of mapping to a HeisenbergModel object.
Returns:
hmodel (HeisenbergModel): MSONable object.
"""
# Original formula unit with nonmagnetic ions
hm_formula = str(self.ordered_structures_[0].composition.reduced_formula)
hm_structures = self.ordered_structures
hm_energies = self.energies
hm_cutoff = self.cutoff
hm_tol = self.tol
hm_sgraphs = self.sgraphs
hm_usi = self.unique_site_ids
hm_wids = self.wyckoff_ids
hm_nni = self.nn_interactions
hm_d = self.dists
# Exchange matrix DataFrame in json format
hm_em = self.ex_mat.to_json()
hm_ep = self.get_exchange()
hm_javg = self.estimate_exchange()
hm_igraph = self.get_interaction_graph()
hmodel = HeisenbergModel(
hm_formula,
hm_structures,
hm_energies,
hm_cutoff,
hm_tol,
hm_sgraphs,
hm_usi,
hm_wids,
hm_nni,
hm_d,
hm_em,
hm_ep,
hm_javg,
hm_igraph,
)
return hmodel
class HeisenbergScreener:
"""
Class to clean and screen magnetic orderings.
"""
def __init__(self, structures, energies, screen=False):
"""
This class pre-processes magnetic orderings and energies for
HeisenbergMapper. It prioritizes low-energy orderings with large and
localized magnetic moments.
Args:
structures (list): Structure objects with magnetic moments.
energies (list): Energies/atom of magnetic orderings.
screen (bool): Try to screen out high energy and low-spin configurations.
Attributes:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
# Cleanup
structures, energies = self._do_cleanup(structures, energies)
n_structures = len(structures)
# If there are more than 2 structures, we want to perform a
# screening to prioritize well-behaved orderings
if screen and n_structures > 2:
structures, energies = self._do_screen(structures, energies)
self.screened_structures = structures
self.screened_energies = energies
@staticmethod
def _do_cleanup(structures, energies):
"""Sanitize input structures and energies.
Takes magnetic structures and performs the following operations
- Erases nonmagnetic ions and gives all ions ['magmom'] site prop
- Converts total energies -> energy / magnetic ion
- Checks for duplicate/degenerate orderings
- Sorts by energy
Args:
structures (list): Structure objects with magmoms.
energies (list): Corresponding energies.
Returns:
ordered_structures (list): Sanitized structures.
ordered_energies (list): Sorted energies.
"""
# Get only magnetic ions & give all structures site_properties['magmom']
# zero threshold so that magnetic ions with small moments
# are preserved
ordered_structures = [
CollinearMagneticStructureAnalyzer(
s, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
for s in structures
]
# Convert to energies / magnetic ion
energies = [e / len(s) for (e, s) in zip(energies, ordered_structures)]
# Check for duplicate / degenerate states (sometimes different initial
# configs relax to the same state)
remove_list = []
for i, e in enumerate(energies):
e_tol = 6 # 10^-6 eV/atom tol on energies
e = round(e, e_tol)
if i not in remove_list:
for i_check, e_check in enumerate(energies):
e_check = round(e_check, e_tol)
if i != i_check and i_check not in remove_list and e == e_check:
remove_list.append(i_check)
# Also discard structures with small |magmoms| < 0.1 uB
# xx - get rid of these or just bury them in the list?
# for i, s in enumerate(ordered_structures):
# magmoms = s.site_properties['magmom']
# if i not in remove_list:
# if any(abs(m) < 0.1 for m in magmoms):
# remove_list.append(i)
# Remove duplicates
if len(remove_list):
ordered_structures = [
s for i, s in enumerate(ordered_structures) if i not in remove_list
]
energies = [e for i, e in enumerate(energies) if i not in remove_list]
# Sort by energy if not already sorted
ordered_structures = [
s for _, s in sorted(zip(energies, ordered_structures), reverse=False)
]
ordered_energies = sorted(energies, reverse=False)
return ordered_structures, ordered_energies
@staticmethod
def _do_screen(structures, energies):
"""Screen and sort magnetic orderings based on some criteria.
Prioritize low energy orderings and large, localized magmoms. do_clean should be run first to sanitize inputs.
Args:
structures (list): At least three structure objects.
energies (list): Energies.
Returns:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
magmoms = [s.site_properties["magmom"] for s in structures]
n_below_1ub = [len([m for m in ms if abs(m) < 1]) for ms in magmoms]
df = pd.DataFrame(
{
"structure": structures,
"energy": energies,
"magmoms": magmoms,
"n_below_1ub": n_below_1ub,
}
)
# keep the ground and first excited state fixed to capture the
# low-energy spectrum
index = list(df.index)[2:]
df_high_energy = df.iloc[2:]
# Prioritize structures with fewer magmoms < 1 uB
df_high_energy = df_high_energy.sort_values(by="n_below_1ub")
index = [0, 1] + list(df_high_energy.index)
# sort
df = df.reindex(index)
screened_structures = list(df["structure"].values)
screened_energies = list(df["energy"].values)
return screened_structures, screened_energies
class HeisenbergModel(MSONable):
"""
Store a Heisenberg model fit to low-energy magnetic orderings.
Intended to be generated by HeisenbergMapper.get_heisenberg_model().
"""
def __init__(
self,
formula=None,
structures=None,
energies=None,
cutoff=None,
tol=None,
sgraphs=None,
unique_site_ids=None,
wyckoff_ids=None,
nn_interactions=None,
dists=None,
ex_mat=None,
ex_params=None,
javg=None,
igraph=None,
):
"""
Args:
formula (str): Reduced formula of compound.
structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
tol (float): Tolerance (in Angstrom) on nearest neighbor distances being equal.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom).
javg (float): <J> exchange param (meV/atom).
igraph (StructureGraph): Exchange interaction graph.
"""
self.formula = formula
self.structures = structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
self.sgraphs = sgraphs
self.unique_site_ids = unique_site_ids
self.wyckoff_ids = wyckoff_ids
self.nn_interactions = nn_interactions
self.dists = dists
self.ex_mat = ex_mat
self.ex_params = ex_params
self.javg = javg
self.igraph = igraph
def as_dict(self):
"""
Because some dicts have tuple keys, some sanitization is required for json compatibility.
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["@version"] = __version__
d["formula"] = self.formula
d["structures"] = [s.as_dict() for s in self.structures]
d["energies"] = self.energies
d["cutoff"] = self.cutoff
d["tol"] = self.tol
d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs]
d["dists"] = self.dists
d["ex_params"] = self.ex_params
d["javg"] = self.javg
d["igraph"] = self.igraph.as_dict()
# Sanitize tuple & int keys
d["ex_mat"] = jsanitize(self.ex_mat)
d["nn_interactions"] = jsanitize(self.nn_interactions)
d["unique_site_ids"] = jsanitize(self.unique_site_ids)
d["wyckoff_ids"] = jsanitize(self.wyckoff_ids)
return d
@classmethod
def from_dict(cls, d):
"""Create a HeisenbergModel from a dict."""
# Reconstitute the site ids
usids = {}
wids = {}
nnis = {}
for k, v in d["nn_interactions"].items():
nn_dict = {}
for k1, v1 in v.items():
key = literal_eval(k1)
nn_dict[key] = v1
nnis[k] = nn_dict
for k, v in d["unique_site_ids"].items():
key = literal_eval(k)
if type(key) == int:
usids[tuple([key])] = v
elif type(key) == tuple:
usids[key] = v
for k, v in d["wyckoff_ids"].items():
key = literal_eval(k)
wids[key] = v
# Reconstitute the structure and graph objects
structures = []
sgraphs = []
for v in d["structures"]:
structures.append(Structure.from_dict(v))
for v in d["sgraphs"]:
sgraphs.append(StructureGraph.from_dict(v))
# Interaction graph
igraph = StructureGraph.from_dict(d["igraph"])
# Reconstitute the exchange matrix DataFrame
try:
ex_mat = eval(d["ex_mat"])
ex_mat = pd.DataFrame.from_dict(ex_mat)
except SyntaxError: # if ex_mat is empty
ex_mat = pd.DataFrame(columns=["E", "E0"])
hmodel = HeisenbergModel(
formula=d["formula"],
structures=structures,
energies=d["energies"],
cutoff=d["cutoff"],
tol=d["tol"],
sgraphs=sgraphs,
unique_site_ids=usids,
wyckoff_ids=wids,
nn_interactions=nnis,
dists=d["dists"],
ex_mat=ex_mat,
ex_params=d["ex_params"],
javg=d["javg"],
igraph=igraph,
)
return hmodel
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites +- tol
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
| mit |
mjgrav2001/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
tawsifkhan/trafficjamto | baselines/maxbaseline.py | 1 | 1100 | #!/usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import pyproj
import time
import argparse
import os
def groupData(data, minpts=5):
grouped = data.groupby(['binlat','binlon','bintime','binheading'])
grouped = pd.DataFrame(grouped['speed'].agg([len, np.mean, np.std]))
grouped = grouped.loc[grouped.len > 5]
return grouped
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs="+", help="Raw input csv files")
parser.add_argument('-v','--maxspeed', type=float, default=15.)
parser.add_argument('-o','--output', type=str, default="grouped.csv", help="Output file name")
args = parser.parse_args()
proj = pyproj.Proj("+proj=utm +zone=17N, +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
allbinned = []
for filename in args.infile:
data = pd.read_csv(filename)
grouped = data.groupby(['binlat','binlon','binheading'])['bintime'].agg(np.max)
basename = os.path.splitext(filename)[0]
grouped.to_csv(basename+"-max-over-time.csv")
| gpl-2.0 |
PrashntS/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
googleinterns/new-semantic-parsing | cli/preprocess.py | 1 | 8782 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Preprocess text data and save binary Dataset objects along with tokenizers to a directory."""
import os
import sys
import logging
import argparse
from functools import reduce
from os.path import join as path_join
from random import shuffle
import toml
import torch
import pandas as pd
import transformers
import new_semantic_parsing as nsp
from new_semantic_parsing import utils
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(os.path.basename(__file__))
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def parse_args(args=None):
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument("--data", required=True,
help="path to TOP dataset directory")
parser.add_argument("--text-tokenizer", required=True,
help="pratrained tokenizer name or path to a saved tokenizer")
parser.add_argument("--output-dir", required=True,
help="directory to save preprocessed data")
parser.add_argument("--seed", default=34)
# splitting parameters
parser.add_argument("--split-class", default=None,
help="remove --split-ratio of the class from the training dataset and make a finetune_data; "
"do not perform split by default")
parser.add_argument("--split-amount", default=None, type=float,
help="0 < --split-amount < 1, amount of data to remove from the training dataset")
# fmt: on
args = parser.parse_args(args)
if args.split_amount is not None:
if not 0.0 < args.split_amount < 1.0:
raise ValueError("--split-amount should be between 0. and 1.")
if args.split_class is not None:
if args.split_amount is None:
raise ValueError("--split-amount should be specified if --split-class is provided")
return args
def train_finetune_split(train_data, schema_vocab, split_amount, split_class=None):
"""Split train_data into train and finetune parts with ratio split_amount.
Train part should contain all classses from the original train_data.
If split_class is provided, split across examples containing this class.
E.i. split_amount of data with split_class goes to finetune set.
Args:
train_data: pd.DataFrame
schema_vocab: set of tokens
split_amount: float
split_class: if provided, split across the specified class
"""
# Get a small set of examples that contains all classes from schema_vocab
required_example_ids = utils.get_required_example_ids(schema_vocab, train_data)
ids = set(range(len(train_data)))
if split_class is not None:
ids = set(train_data.index[train_data.schema.str.contains(split_class)])
logger.info(f"Moving {100 * split_amount}% of {split_class} into a finetuning subset")
_take = int(len(ids) * split_amount)
_leave = len(ids) - _take
logger.info(
f"Take {_take} class examples to finetuning set and leave {_leave} class examles in"
" training set."
)
if len(ids) == 0:
raise RuntimeError(f"Cannot find specified class {split_class} in the data.")
split_ids = list(ids - required_example_ids)
take = int(len(split_ids) * split_amount)
leave = len(train_data) - take
assert take > 0
logger.info(f"Taking {take} examples and leaving {leave} examples")
shuffle(split_ids)
subset_ids = split_ids[:take]
subset_ids_set = set(subset_ids)
all_ids = set(range(len(train_data)))
assert len(subset_ids_set.intersection(required_example_ids)) == 0
train_data_ids = list(all_ids - subset_ids_set | required_example_ids)
finetune_data = train_data.iloc[subset_ids]
train_data = train_data.iloc[train_data_ids]
return train_data, finetune_data
def main(args):
utils.set_seed(args.seed)
if os.path.exists(args.output_dir):
raise ValueError(f"output_dir {args.output_dir} already exists")
# File structure:
# that's text\tthat 's text\t[IN:UNSUPPORTED that 's text]
train_path = path_join(path_join(args.data, "train.tsv"))
train_data = pd.read_table(train_path, names=["text", "tokens", "schema"])
full_train_data_size = len(train_data) # used to check the train/finetune split
finetune_data, finetune_path = None, None
schema_vocab = reduce(set.union, map(utils.get_vocab_top_schema, train_data.schema))
if args.split_amount is not None:
# finetune part is not used by train script, but used by retrain script
logger.info("Splitting the training dataset")
train_data, finetune_data = train_finetune_split(
train_data, schema_vocab, args.split_amount, args.split_class
)
os.makedirs(args.output_dir)
finetune_path = path_join(args.output_dir, "finetune.tsv")
logger.info(f"Saving the finetune_data to {finetune_path}")
finetune_data.to_csv(finetune_path, sep="\t", index=False, header=False)
train_path = path_join(args.output_dir, "train.tsv")
logger.info(f"Saving the modified training set to {train_path}")
train_data.to_csv(train_path, sep="\t", index=False, header=False)
logger.info("Getting schema vocabulary")
if args.split_amount is not None:
finetune_schema_vocab = reduce(
set.union, map(utils.get_vocab_top_schema, finetune_data.schema)
)
vocab_delta = finetune_schema_vocab - schema_vocab
if len(vocab_delta) > 0:
logger.warning(
f"Finetuning subset contains vocabulary elements not from the training subset"
)
logger.warning(f"New elements: {', '.join(vocab_delta)}")
logger.info(f"Schema vocabulary size: {len(schema_vocab)}")
logger.info("Building tokenizers")
text_tokenizer = transformers.AutoTokenizer.from_pretrained(args.text_tokenizer, use_fast=True)
schema_tokenizer = nsp.TopSchemaTokenizer(schema_vocab, text_tokenizer)
logger.info("Tokenizing train dataset")
train_dataset = nsp.data.make_dataset(train_path, schema_tokenizer)
logger.info("Tokenizing validation and test datasets")
valid_dataset = nsp.data.make_dataset(path_join(args.data, "eval.tsv"), schema_tokenizer)
test_dataset = nsp.data.make_dataset(path_join(args.data, "test.tsv"), schema_tokenizer)
finetune_dataset = None
if args.split_amount is not None:
logger.info("Tokenizing finetune set")
finetune_dataset = nsp.data.make_dataset(finetune_path, schema_tokenizer)
logger.info(f"Original train set size: {full_train_data_size}")
logger.info(f"Reduced train set size: {len(train_dataset)}")
logger.info(f"Finetune set size: {len(finetune_dataset)}")
train_finetune_data_size = len(train_dataset) + len(finetune_dataset)
if train_finetune_data_size != full_train_data_size:
raise RuntimeError(f"{train_finetune_data_size} != {full_train_data_size}")
logger.info(f"Saving config, data and tokenizer to {args.output_dir}")
os.makedirs(args.output_dir, exist_ok=True)
with open(path_join(args.output_dir, "args.toml"), "w") as f:
args_dict = {"version": nsp.SAVE_FORMAT_VERSION, **vars(args)}
toml.dump(args_dict, f)
# text tokenizer is saved along with schema_tokenizer
model_type = None
if not os.path.exists(args.text_tokenizer):
model_type = utils.get_model_type(args.text_tokenizer)
schema_tokenizer.save(path_join(args.output_dir, "tokenizer"), encoder_model_type=model_type)
data_state = {
"train_dataset": train_dataset,
"valid_dataset": valid_dataset,
"test_dataset": test_dataset,
"finetune_dataset": finetune_dataset,
"version": nsp.SAVE_FORMAT_VERSION,
}
torch.save(data_state, path_join(args.output_dir, "data.pkl"))
if __name__ == "__main__":
args = parse_args()
main(args)
| apache-2.0 |
cmunk/protwis | angles/views.py | 1 | 11779 | from django.conf import settings
from django.contrib.postgres.aggregates import ArrayAgg
from django.shortcuts import render
from django.db.models import Count, Avg, Min, Max, Q
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView, View
import contactnetwork.pdb as pdb
from structure.models import Structure
from residue.models import Residue
from angles.models import ResidueAngle as Angle
import Bio.PDB
import copy
import io
import math
import cmath
from collections import OrderedDict
import numpy as np
from sklearn.decomposition import PCA
from numpy.core.umath_tests import inner1d
import freesasa
import scipy.stats as stats
def angleAnalysis(request):
"""
Show angle analysis page
"""
return render(request, 'angles/angleanalysis.html')
def angleAnalyses(request):
"""
Show angle analyses page
"""
return render(request, 'angles/angleanalyses.html')
def structureCheck(request):
"""
Show structure annotation check page
"""
return render(request, 'angles/structurecheck.html')
def get_angles(request):
data = {'error': 0}
# angle names for custom averaging
angles = ['avg_aangle', 'avg_bangle', 'avg_outer', 'avg_phi', 'avg_psi', 'avg_theta', 'avg_tau']
# Request selection
try:
#if True:
pdbs = request.GET.getlist('pdbs[]')
pdbs = set([pdb.upper() for pdb in pdbs])
print(pdbs)
pdbs2 = request.GET.getlist('pdbs2[]')
pdbs2 = set([pdb.upper() for pdb in pdbs2])
print(pdbs2)
# Grab PDB data
if len(pdbs)==1 and len(pdbs2)==0:
pdbs = list(pdbs)
query = Angle.objects.filter(structure__pdb_code__index=pdbs[0]).prefetch_related("residue__generic_number").order_by('residue__generic_number__label')
# Prep data
data['data'] = [[q.residue.generic_number.label,q.residue.sequence_number, q.a_angle, q.b_angle, q.outer_angle, q.hse, q.sasa, q.rsa, q.phi, q.psi, q.theta, q.tau, q.core_distance, q.ss_dssp, q.ss_stride ] for q in query]
data['headers'] = [{"title" : "Value"}]
else: # always a grouping or a comparison
query = Angle.objects.filter(structure__pdb_code__index__in=pdbs).prefetch_related("residue__generic_number") \
.values("residue__generic_number__label") \
.order_by('residue__generic_number__label') \
.annotate(min_aangle = Min('a_angle'), avg_aangle=ArrayAgg('a_angle'), max_aangle = Max('a_angle'), \
min_bangle = Min('b_angle'), avg_bangle=ArrayAgg('b_angle'), max_bangle = Max('b_angle'), \
min_outer = Min('outer_angle'), avg_outer=ArrayAgg('outer_angle'), max_outer = Max('outer_angle'), \
min_hse = Min('hse'), avg_hse=Avg('hse'), max_hse = Max('hse'), \
min_sasa = Min('sasa'), avg_sasa=Avg('sasa'), max_sasa = Max('sasa'), \
min_rsa = Min('rsa'), avg_rsa=Avg('rsa'), max_rsa = Max('rsa'), \
min_phi = Min('phi'), avg_phi=ArrayAgg('phi'), max_phi = Max('phi'), \
min_psi = Min('psi'), avg_psi=ArrayAgg('psi'), max_psi = Max('psi'), \
min_theta = Min('theta'), avg_theta=ArrayAgg('theta'), max_theta = Max('theta'), \
min_tau = Min('tau'), avg_tau=ArrayAgg('tau'), max_tau = Max('tau'), \
min_distance = Min('core_distance'), avg_distance=Avg('core_distance'), max_distance = Max('core_distance'))
# Process angle aggregates to angle averages
for q in query:
for angle in angles:
q[angle] = [ qa for qa in q[angle] if qa != None]
if angle in q and len(q[angle]) > 1:
# Sensible average for multiple angles (circular statistics: https://rosettacode.org/wiki/Averages/Mean_angle)
q[angle] = math.degrees(cmath.phase(sum(cmath.rect(1, math.radians(float(d))) for d in q[angle])/len(q[angle])))
elif len(q[angle]) == 1:
q[angle] = q[angle][0]
# Prep data
data['data'] = [ [q["residue__generic_number__label"], " ", \
[q["min_aangle"], q["avg_aangle"], q["max_aangle"]], \
[q["min_bangle"], q["avg_bangle"], q["max_bangle"]], \
[q["min_outer"], q["avg_outer"], q["max_outer"]], \
[q["min_hse"], q["avg_hse"], q["max_hse"]], \
[q["min_sasa"], q["avg_sasa"], q["max_sasa"]], \
[q["min_rsa"], q["avg_rsa"], q["max_rsa"]], \
[q["min_phi"], q["avg_phi"], q["max_phi"]], \
[q["min_psi"], q["avg_psi"], q["max_psi"]], \
[q["min_theta"], q["avg_theta"], q["max_theta"]], \
[q["min_tau"], q["avg_tau"], q["max_tau"]], \
[q["min_distance"], q["avg_distance"], q["max_distance"]], \
] for q in query]
print(data)
if len(pdbs2)==0:
data['headers'] = [{"title" : "Group<br/>Min"},{"title" : "Group<br/>Avg"},{"title" : "Group<br/>Max"}]
else:
data['headers'] = [{"title" : "Group 1<br/>Min"},{"title" : "Group 1<br/>Avg"},{"title" : "Group 1<br/>Max"}]
# Select PDBs from same Class + same state
data['headers2'] = [{"title" : "Group 2<br/>Min"},{"title" : "Group 2<br/>Avg"},{"title" : "Group 2<br/>Max"}]
if len(pdbs2)==0:
# select structure(s)
structures = Structure.objects.filter(pdb_code__index__in=pdbs) \
.select_related('protein_conformation__protein__family','protein_conformation__state')
# select PDBs
states = set( structure.protein_conformation.state.slug for structure in structures )
classes = set( structure.protein_conformation.protein.family.slug[:3] for structure in structures )
query = Q()
for classStart in classes:
query = query | Q(protein_conformation__protein__family__slug__startswith=classStart)
set2 = Structure.objects.filter(protein_conformation__state__slug__in=states).filter(query).values_list('pdb_code__index')
pdbs2 = [ x[0] for x in set2 ]
data['headers2'] = [{"title" : "Class<br/>Min"},{"title" : "Class<br/>Avg"},{"title" : "Class<br/>Max"}]
query = Angle.objects.filter(structure__pdb_code__index__in=pdbs2).prefetch_related("residue__generic_number") \
.values("residue__generic_number__label") \
.annotate(min_aangle = Min('a_angle'), avg_aangle=ArrayAgg('a_angle'), max_aangle = Max('a_angle'), \
min_bangle = Min('b_angle'), avg_bangle=ArrayAgg('b_angle'), max_bangle = Max('b_angle'), \
min_outer = Min('outer_angle'), avg_outer=ArrayAgg('outer_angle'), max_outer = Max('outer_angle'), \
min_hse = Min('hse'), avg_hse=Avg('hse'), max_hse = Max('hse'), \
min_sasa = Min('sasa'), avg_sasa=Avg('sasa'), max_sasa = Max('sasa'), \
min_rsa = Min('rsa'), avg_rsa=Avg('rsa'), max_rsa = Max('rsa'), \
min_phi = Min('phi'), avg_phi=ArrayAgg('phi'), max_phi = Max('phi'), \
min_psi = Min('psi'), avg_psi=ArrayAgg('psi'), max_psi = Max('psi'), \
min_theta = Min('theta'), avg_theta=ArrayAgg('theta'), max_theta = Max('theta'), \
min_tau = Min('tau'), avg_tau=ArrayAgg('tau'), max_tau = Max('tau'), \
min_distance = Min('core_distance'), avg_distance=Avg('core_distance'), max_distance = Max('core_distance'))
# Process angle aggregates to angle averages
for q in query:
for angle in angles:
q[angle] = [ q for q in q[angle] if q != None]
if angle in q and len(q[angle]) > 1:
# Sensible average for multiple angles (circular statistics: https://rosettacode.org/wiki/Averages/Mean_angle)
q[angle] = math.degrees(cmath.phase(sum(cmath.rect(1, math.radians(float(d))) for d in q[angle])/len(q[angle])))
elif len(q[angle]) == 1:
q[angle] = q[angle][0]
# Prep data
data['data2'] = { q["residue__generic_number__label"]: [q["residue__generic_number__label"], " ", \
[q["min_aangle"], q["avg_aangle"], q["max_aangle"]], \
[q["min_bangle"], q["avg_bangle"], q["max_bangle"]], \
[q["min_outer"], q["avg_outer"], q["max_outer"]], \
[q["min_hse"], q["avg_hse"], q["max_hse"]], \
[q["min_sasa"], q["avg_sasa"], q["max_sasa"]], \
[q["min_rsa"], q["avg_rsa"], q["max_rsa"]], \
[q["min_phi"], q["avg_phi"], q["max_phi"]], \
[q["min_psi"], q["avg_psi"], q["max_psi"]], \
[q["min_theta"], q["avg_theta"], q["max_theta"]], \
[q["min_tau"], q["avg_tau"], q["max_tau"]], \
[q["min_distance"], q["avg_distance"], q["max_distance"]], \
] for q in query}
except IndexError:
#else:
data['error'] = 1
data['errorMessage'] = "No PDB(s) selection provided"
return JsonResponse(data)
def ServePDB(request, pdbname):
# query = Angle.objects.filter(residue__protein_segment__slug__in=['TM1','TM2','TM3','TM4','TM5','TM6','TM7','H8']).prefetch_related("residue__generic_number") \
# .aggregate(total=Count('ss_stride'), \
# total2=Count('ss_dssp'))
# print(query)
#
# query = Angle.objects.filter(residue__protein_segment__slug__in=['TM1','TM2','TM3','TM4','TM5','TM6','TM7','H8']).prefetch_related("residue__generic_number") \
# .values("ss_stride") \
# .annotate(total=Count('ss_stride')) \
# .order_by('ss_stride')
# print(query)
#
# query = Angle.objects.filter(residue__protein_segment__slug__in=['TM1','TM2','TM3','TM4','TM5','TM6','TM7','H8']).prefetch_related("residue__generic_number") \
# .values("ss_dssp") \
# .annotate(total=Count('ss_dssp')) \
# .order_by('ss_dssp')
# print(query)
structure=Structure.objects.filter(pdb_code__index=pdbname.upper())
if structure.exists():
structure=structure.get()
else:
quit()
if structure.pdb_data is None:
quit()
only_gns = list(structure.protein_conformation.residue_set.exclude(generic_number=None).values_list('protein_segment__slug','sequence_number','generic_number__label').all())
only_gn = []
gn_map = []
segments = {}
for gn in only_gns:
only_gn.append(gn[1])
gn_map.append(gn[2])
if gn[0] not in segments:
segments[gn[0]] = []
segments[gn[0]].append(gn[1])
data = {}
data['pdb'] = structure.pdb_data.pdb
data['only_gn'] = only_gn
data['gn_map'] = gn_map
data['segments'] = segments
data['chain'] = structure.preferred_chain
return JsonResponse(data)
| apache-2.0 |
KellyChan/python-examples | python/crawlers/crawler/yelp/api_yelp.py | 3 | 1528 | """
Author: Kelly Chan
Date: July 12 2014
"""
import time
import rauth
import pandas
import simplejson as json
def defineParams(latitude, longitude):
params = {}
params["term"] = "restaurants"
params["ll"] = "{},{}".format(str(latitude), str(longitude))
params["radius_filter"] = "2000"
params["limit"] = "10"
return params
def getData(params):
# setting up personal Yelp account
consumer_key = "XXX"
consumer_secret = "XXXX"
token = "XXX"
token_secret = "XXX"
session = rauth.OAuth1Session(consumer_key = consumer_key,
consumer_secret = consumer_secret,
access_token = token,
access_token_secret = token_secret)
request = session.get("http://api.yelp.com/v2/search", params=params)
# transforming the data in JSON format
data = request.json()
session.close()
return data
def main():
locations = [(39.98,-82.98),(42.24,-83.61),(41.33,-89.13)]
apiData = []
for latitude, longitude in locations:
params = defineParams(latitude, longitude)
apiData.append(getData(params))
time.sleep(1.0)
#print len(apiData)
for key in apiData[0].keys():
print key
for record in apiData:
print record["businesses"]
print record['total']
print record['region']
print(json.dumps(apiData, sort_keys=True, indent=4 * ' '))
if __name__ == '__main__':
main()
| mit |
Caranarq/01_Dmine | 00_Generales/ProxyParametros.py | 1 | 3407 | # -*- coding: utf-8 -*-
"""
Started on Thu Sep 7 16:46:39 2017
@author: carlos.arana
"""
'''
Descripcion:
Script para crear proxies de parametros.
Proxies Creados a partir de la lista de parametros disponibles al 11 de septiembre de 2017
'''
import pandas as pd
import os
import random
module_path = r'D:\PCCS\01_Dmine\00_Parametros'
if module_path not in sys.path:
sys.path.append(module_path)
from AsignarDimension.AsignarDimension import AsignarDimension
RutaEntrada = r'D:\PCCS\ListaParametros.xlsx'
lista = pd.read_excel(RutaEntrada, sheetname='lista', index_col = 0,
dtype={'ClaveDimension' : str})
# Cargar archivo integrador
RutaIntegrador = r'D:\PCCS\01_Dmine\00_Parametros\CatalogoParametros.xlsx'
HojaIndice = pd.read_excel(RutaIntegrador, sheetname='INDICE', index_col = 0)
HojaParametros = pd.read_excel(RutaIntegrador, sheetname='PARAMETROS', index_col = 0)
HojaIntegridad = pd.read_excel(RutaIntegrador, sheetname='INTEGRIDAD', index_col = 0)
# Funciones para creacion de proxies
def HazProxy(ID_PARAMETRO):
indice = HojaParametros.index
ProxyValores = random.sample(range(100,1000), 135)
Proxy = pd.Series(data=ProxyValores, index=indice, name=ID_PARAMETRO)
return Proxy
def HazProxyIntegridad(ID_PARAMETRO):
indice = HojaParametros.index
ProxyValores = np.random.sample(135).tolist()
Proxy = pd.Series(data=ProxyValores, index=indice, name=ID_PARAMETRO)
return Proxy.round(decimals=3)
EsProxy = '(Mineria de datos de parametro pendiente)\n\nProxy creado con fines de coordinacion'
for indecs, row in lista.iterrows():
DirBase = r'D:\PCCS\01_Dmine'
SubDir = '{}_{}'.format(row['ClaveDimension'], AsignarDimension(row['ClaveDimension'])['directorio'])
DirDestino = r'{}\{}\{}'.format(DirBase, SubDir, indecs)
RepoBase = 'https://github.com/INECC-PCCS/01_Dmine/tree/master/'
SubRepo = '{}{}/{}'.format(RepoBase, SubDir, indecs)
if not os.path.isdir(DirDestino): # Crea un Directorio si es que no existe
os.makedirs(DirDestino)
readmefile = '{}\README.md'.format(DirDestino)
Glosa = '# [PROXY] {} - {}\n\n{}'.format(indecs, row['Nombre Parametro'],EsProxy)
with open(readmefile, 'w') as README:
README.write(Glosa)
if not indecs in HojaIndice.index: # Crea un proxy de informacion para un solo parametro
DatosIndice = pd.DataFrame(index=[indecs],
data={
'NOM_PARAMETRO': row['Nombre Parametro'],
'ARCHIVO_LOCAL': 'proxy',
'URL_MINERIA': SubRepo
})
HojaIndice.loc[DatosIndice.iloc[0].name] = DatosIndice.iloc[0] # Escribe el proxy en el indice
else: continue
if not indecs in HojaParametros: # Crea un proxy del parámetro
HojaParametros[indecs] = HazProxy(indecs)
HojaIntegridad[indecs] = HazProxyIntegridad(indecs)
else: continue
print('Proxy creado para {} - {}\nCarpeta: {}\nRepo: {}'.format(indecs, row['Nombre Parametro'], DirDestino, SubRepo))
# Guardar archivo integrador
writer = pd.ExcelWriter(RutaEntrada)
HojaIndice.to_excel(writer, sheet_name='INDICE')
HojaParametros.to_excel(writer, sheet_name='PARAMETROS')
HojaIntegridad.to_excel(writer, sheet_name='INTEGRIDAD')
writer.save()
| gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/legend.py | 2 | 37156 | """
The legend module defines the Legend class, which is responsible for
drawing legends associated with axes and/or figures.
The Legend class can be considered as a container of legend handles
and legend texts. Creation of corresponding legend handles from the
plot elements in the axes or figures (e.g., lines, patches, etc.) are
specified by the handler map, which defines the mapping between the
plot elements and the legend handlers to be used (the default legend
handlers are defined in the :mod:`~matplotlib.legend_handler` module). Note
that not all kinds of artist are supported by the legend yet (See
:ref:`plotting-guide-legend` for more information).
"""
from __future__ import division, print_function
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist, allow_rasterization
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection, \
CircleCollection, PathCollection
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.transforms import BboxTransformTo, BboxTransformFrom
from matplotlib.offsetbox import HPacker, VPacker, TextArea, DrawingArea
from matplotlib.offsetbox import DraggableOffsetBox
from matplotlib.container import ErrorbarContainer, BarContainer, StemContainer
from matplotlib import MatplotlibDeprecationWarning as mplDeprecation
import legend_handler
class DraggableLegend(DraggableOffsetBox):
def __init__(self, legend, use_blit=False, update="loc"):
"""
update : If "loc", update *loc* parameter of
legend upon finalizing. If "bbox", update
*bbox_to_anchor* parameter.
"""
self.legend = legend
if update in ["loc", "bbox"]:
self._update = update
else:
raise ValueError("update parameter '%s' is not supported." %
update)
DraggableOffsetBox.__init__(self, legend, legend._legend_box,
use_blit=use_blit)
def artist_picker(self, legend, evt):
return self.legend.contains(evt)
def finalize_offset(self):
loc_in_canvas = self.get_loc_in_canvas()
if self._update == "loc":
self._update_loc(loc_in_canvas)
elif self._update == "bbox":
self._update_bbox_to_anchor(loc_in_canvas)
else:
raise RuntimeError("update parameter '%s' is not supported." %
self.update)
def _update_loc(self, loc_in_canvas):
bbox = self.legend.get_bbox_to_anchor()
# if bbox has zero width or height, the transformation is
# ill-defined. Fall back to the defaul bbox_to_anchor.
if bbox.width == 0 or bbox.height == 0:
self.legend.set_bbox_to_anchor(None)
bbox = self.legend.get_bbox_to_anchor()
_bbox_transform = BboxTransformFrom(bbox)
self.legend._loc = tuple(
_bbox_transform.transform_point(loc_in_canvas))
def _update_bbox_to_anchor(self, loc_in_canvas):
tr = self.legend.axes.transAxes
loc_in_bbox = tr.transform_point(loc_in_canvas)
self.legend.set_bbox_to_anchor(loc_in_bbox)
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
"""
codes = {'best': 0, # only implemented for axis legends
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc=None,
numpoints=None, # the number of points in the legend line
markerscale=None, # the relative size of legend markers
# vs. original
scatterpoints=3, # TODO: may be an rcParam
scatteryoffsets=None,
prop=None, # properties for the legend texts
fontsize=None, # keyword to set font size directly
# the following dimensions are in axes coords
pad=None, # deprecated; use borderpad
labelsep=None, # deprecated; use labelspacing
handlelen=None, # deprecated; use handlelength
handletextsep=None, # deprecated; use handletextpad
axespad=None, # deprecated; use borderaxespad
# spacing & pad defined as a fraction of the font-size
borderpad=None, # the whitespace inside the legend border
labelspacing=None, # the vertical space between the legend
# entries
handlelength=None, # the length of the legend handles
handleheight=None, # the height of the legend handles
handletextpad=None, # the pad between the legend handle
# and text
borderaxespad=None, # the pad between the axes and legend
# border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns.
# None, "expand"
fancybox=None, # True use a fancy box, false use a rounded
# box, none use rc
shadow=None,
title=None, # set a title for the legend
bbox_to_anchor=None, # bbox that the legend will be anchored.
bbox_transform=None, # transform for the bbox
frameon=None, # draw frame
handler_map=None,
):
"""
- *parent*: the artist that contains the legend
- *handles*: a list of artists (lines, patches) to be added to the
legend
- *labels*: a list of strings to label the legend
Optional keyword arguments:
================ ====================================================
Keyword Description
================ ====================================================
loc a location code
prop the font property
fontsize the font size (used only if prop is not specified)
markerscale the relative size of legend markers vs. original
numpoints the number of points in the legend for line
scatterpoints the number of points in the legend for scatter plot
scatteryoffsets a list of yoffsets for scatter symbols in legend
frameon if True, draw a frame around the legend.
If None, use rc
fancybox if True, draw a frame with a round fancybox.
If None, use rc
shadow if True, draw a shadow behind legend
ncol number of columns
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handleheight the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
title the legend title
bbox_to_anchor the bbox that the legend will be anchored.
bbox_transform the transform for the bbox. transAxes if None.
================ ====================================================
The pad and spacing parameters are measured in font-size units. E.g.,
a fontsize of 10 points and a handlelength=5 implies a handlelength of
50 points. Values from rcParams will be used if None.
Users can specify any arbitrary location for the legend using the
*bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance
of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.
See :meth:`set_bbox_to_anchor` for more detail.
The legend location can be specified by setting *loc* with a tuple of
2 floats, which is interpreted as the lower-left corner of the legend
in the normalized axes coordinate.
"""
# local import only to avoid circularity
from matplotlib.axes import Axes
from matplotlib.figure import Figure
Artist.__init__(self)
if prop is None:
if fontsize is not None:
self.prop = FontProperties(size=fontsize)
else:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self._fontsize = self.prop.get_size_in_points()
propnames = ["numpoints", "markerscale", "shadow", "columnspacing",
"scatterpoints", "handleheight"]
self.texts = []
self.legendHandles = []
self._legend_title_box = None
self._handler_map = handler_map
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend." + name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad": "borderpad",
"labelsep": "labelspacing",
"handlelen": "handlelength",
"handletextsep": "handletextpad",
"axespad": "borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height) / self._fontsize
for k, v in deprecated_kwds.iteritems():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
mplDeprecation)
setattr(self, v, localdict[k] * axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend." + v])
else:
setattr(self, v, localdict[v])
del localdict
handles = list(handles)
if len(handles) < 2:
ncol = 1
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be > 0; it was %d" % numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3. / 8., 4. / 8., 2.5 / 8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.scatterpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets,
reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent, Axes):
self.isaxes = True
self.set_axes(parent)
self.set_figure(parent.figure)
elif isinstance(parent, Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0, 'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back '
'on "best"; valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.iterkeys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back '
'on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.iterkeys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not '
'implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._mode = mode
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor=rcParams["axes.facecolor"],
edgecolor=rcParams["axes.edgecolor"],
mutation_scale=self._fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox:
self.legendPatch.set_boxstyle("round", pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square", pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = frameon
if frameon is None:
self._drawFrame = rcParams["legend.frameon"]
# init with null renderer
self._init_legend_box(handles, labels)
self._loc = loc
self.set_title(title)
self._last_fontsize_points = self._fontsize
self._draggable = None
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
if self.isaxes:
a.set_axes(self.axes)
a.set_transform(self.get_transform())
def _set_loc(self, loc):
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
self._loc_real = loc
if loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
#def findoffset(width, height, xdescent, ydescent):
# return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(_findoffset)
self._loc_real = loc
def _get_loc(self):
return self._loc_real
_loc = property(_get_loc, _set_loc)
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Helper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox + xdescent, oy + ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Helper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc) == 2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.get_bbox_to_anchor()
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox,
self.get_bbox_to_anchor(),
renderer)
return x + xdescent, y + ydescent
@allow_rasterization
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible():
return
renderer.open_group('legend')
fontsize = renderer.points_to_pixels(self._fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2 * (self.borderaxespad + self.borderpad) * fontsize
self._legend_box.set_width(self.get_bbox_to_anchor().width - pad)
# update the location and size of the legend. This needs to
# be done in any case to clip the figure right.
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self._drawFrame:
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self._fontsize
else:
return renderer.points_to_pixels(self._fontsize)
# _default_handler_map defines the default mapping between plot
# elements and the legend handlers.
_default_handler_map = {
StemContainer: legend_handler.HandlerStem(),
ErrorbarContainer: legend_handler.HandlerErrorbar(),
Line2D: legend_handler.HandlerLine2D(),
Patch: legend_handler.HandlerPatch(),
LineCollection: legend_handler.HandlerLineCollection(),
RegularPolyCollection: legend_handler.HandlerRegularPolyCollection(),
CircleCollection: legend_handler.HandlerCircleCollection(),
BarContainer: legend_handler.HandlerPatch(
update_func=legend_handler.update_from_first_child),
tuple: legend_handler.HandlerTuple(),
PathCollection: legend_handler.HandlerPathCollection()
}
# (get|set|update)_default_handler_maps are public interfaces to
# modify the defalut handler map.
@classmethod
def get_default_handler_map(cls):
"""
A class method that returns the default handler map.
"""
return cls._default_handler_map
@classmethod
def set_default_handler_map(cls, handler_map):
"""
A class method to set the default handler map.
"""
cls._default_handler_map = handler_map
@classmethod
def update_default_handler_map(cls, handler_map):
"""
A class method to update the default handler map.
"""
cls._default_handler_map.update(handler_map)
def get_legend_handler_map(self):
"""
return the handler map.
"""
default_handler_map = self.get_default_handler_map()
if self._handler_map:
hm = default_handler_map.copy()
hm.update(self._handler_map)
return hm
else:
return default_handler_map
@staticmethod
def get_legend_handler(legend_handler_map, orig_handle):
"""
return a legend handler from *legend_handler_map* that
corresponds to *orig_handler*.
*legend_handler_map* should be a dictionary object (that is
returned by the get_legend_handler_map method).
It first checks if the *orig_handle* itself is a key in the
*legend_hanler_map* and return the associated value.
Otherwise, it checks for each of the classes in its
method-resolution-order. If no matching key is found, it
returns None.
"""
legend_handler_keys = legend_handler_map.keys()
if orig_handle in legend_handler_keys:
handler = legend_handler_map[orig_handle]
else:
for handle_type in type(orig_handle).mro():
if handle_type in legend_handler_map:
handler = legend_handler_map[handle_type]
break
else:
handler = None
return handler
def _init_legend_box(self, handles, labels):
"""
Initialize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self._fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
descent = 0.35 * self._approx_text_height() * (self.handleheight - 0.7)
# 0.35 and 0.7 are just heuristic numbers. this may need to be improbed
height = self._approx_text_height() * self.handleheight - descent
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their coordinates should
# be given in the display coordinates.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
legend_handler_map = self.get_legend_handler_map()
for orig_handle, lab in zip(handles, labels):
handler = self.get_legend_handler(legend_handler_map, orig_handle)
if handler is None:
warnings.warn(
"Legend does not support %s\nUse proxy artist "
"instead.\n\n"
"http://matplotlib.sourceforge.net/users/legend_guide.html#using-proxy-artist\n" %
(str(orig_handle),))
handle_list.append(None)
continue
textbox = TextArea(lab, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handlebox = DrawingArea(width=self.handlelength * fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handler(self, orig_handle,
#xdescent, ydescent, width, height,
fontsize,
handlebox)
handle_list.append(handle)
handleboxes.append(handlebox)
if len(handleboxes) > 0:
# We calculate number of rows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaining
# (num_smallcol) columns will have (nrows) rows.
ncol = min(self._ncol, len(handleboxes))
nrows, num_largecol = divmod(len(handleboxes), ncol)
num_smallcol = ncol - num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0,
num_largecol * (nrows + 1),
(nrows + 1)),
[nrows + 1] * num_largecol)
smallcol = safezip(range(num_largecol * (nrows + 1),
len(handleboxes),
nrows),
[nrows] * num_smallcol)
else:
largecol, smallcol = [], []
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol + smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad * fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0 + di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing * fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing * fontsize
self._legend_handle_box = HPacker(pad=0,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_title_box = TextArea("")
self._legend_box = VPacker(pad=self.borderpad * fontsize,
sep=self.labelspacing * fontsize,
align="center",
children=[self._legend_title_box,
self._legend_handle_box])
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
# should always hold because function is only called internally
assert self.isaxes
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self.set_frame_on(b)
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
children.extend(self.get_lines())
children.extend(self.get_patches())
children.extend(self.get_texts())
children.append(self.get_frame())
if self._legend_title_box:
children.append(self.get_title())
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch',
[h for h in self.legendHandles
if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def set_title(self, title, prop=None):
"""
set the legend title. Fontproperties can be optionally set
with *prop* parameter.
"""
self._legend_title_box._text.set_text(title)
if prop is not None:
if isinstance(prop, dict):
prop = FontProperties(**prop)
self._legend_title_box._text.set_fontproperties(prop)
if title:
self._legend_title_box.set_visible(True)
else:
self._legend_title_box.set_visible(False)
def get_title(self):
'return Text instance for the legend title'
return self._legend_title_box._text
def get_window_extent(self, *args, **kwargs):
'return a extent of the the legend'
return self.legendPatch.get_window_extent(*args, **kwargs)
def get_frame_on(self):
"""
Get whether the legend box patch is drawn
"""
return self._drawFrame
def set_frame_on(self, b):
"""
Set whether the legend box patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._drawFrame = b
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.parent.bbox
else:
return self._bbox_to_anchor
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the legend will be anchored.
*bbox* can be a BboxBase instance, a tuple of [left, bottom,
width, height] in the given transform (normalized axes
coordinate if None), or a tuple of [left, bottom] where the
width and height will be assumed to be zero.
"""
if bbox is None:
self._bbox_to_anchor = None
return
elif isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
if transform is None:
transform = BboxTransformTo(self.parent.bbox)
self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor,
transform)
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding
"best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self._fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
# should always hold because function is only called internally
assert self.isaxes
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.get_bbox_to_anchor(),
renderer)
for x
in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
def contains(self, event):
return self.legendPatch.contains(event)
def draggable(self, state=None, use_blit=False, update="loc"):
"""
Set the draggable state -- if state is
* None : toggle the current state
* True : turn draggable on
* False : turn draggable off
If draggable is on, you can drag the legend on the canvas with
the mouse. The DraggableLegend helper instance is returned if
draggable is on.
The update parameter control which parameter of the legend changes
when dragged. If update is "loc", the *loc* paramter of the legend
is changed. If "bbox", the *bbox_to_anchor* parameter is changed.
"""
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableLegend(self,
use_blit,
update=update)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
| mit |
cwu2011/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
Djabbz/scikit-learn | sklearn/linear_model/logistic.py | 1 | 65251 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual, sample_weight):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if solver == 'liblinear' and sample_weight is not None:
raise ValueError("Solver %s does not support "
"sample weights." % solver)
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
galfaroi/trading-with-python | lib/csvDatabase.py | 77 | 6045 | # -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail() | bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.