repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Barmaley-exe/scikit-learn | sklearn/utils/tests/test_extmath.py | 3 | 16594 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic, logistic_sigmoid
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
"""Check that svd_flip works in both situations, and reconstructs input."""
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
"""Check if cartesian product delivers the right results"""
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
"""Check correctness and robustness of logistic sigmoid implementation"""
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
with warnings.catch_warnings(record=True):
assert_array_almost_equal(logistic_sigmoid(x), naive_logistic(x))
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
"""Check fast dot blas wrapper function"""
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
"""Test Youngs and Cramer incremental variance formulas."""
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
"""Test that degrees of freedom parameter for calculations are correct."""
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(batch, incremental_means,
incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
scizen9/kpy | GRB/GBM_exp.py | 1 | 1780 | """
GBM_exp.py
Author: Ginny Cunningham
Date: December 11, 2017
For a given magnitude and time of a GRB, calculate the expected magnitude at a later time assuming a power law decay.
Usage: python GBM_exp.py [Initial_Magnitude] [Age of Burst]
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
print " "
#####Inputs
inputs = sys.argv
m0 = float(inputs[1]) #Initial magnitude (read from email alert)
t0 = float(inputs[2]) #Time since burst (read from email alert) [s]
#delta_t = float(inputs[3]) #Expected delta_t from t0 to time of observation with SEDm [s]
delta_t = 600. #Use a placeholder value of 10 minutes for now [s]
t_obs = t0+delta_t #Expected time of observation [s]
#print m0,t0,t_obs
#####Power law index
gamma = 1.
#####Calculate expected magnitude
tend = 10*t_obs #End of observation run [s]
t = np.linspace(t0, tend, 1000) #Range of times to observe over [s]
m = 2.5*gamma*np.log10(t/t0)+m0 #final magnitude
m_exp = 2.5*gamma*np.log10((t_obs)/t0)+m0 #Expected magnitude at t_obs=t0+delta_t
print "Expected Magnitude %s s after initial UVOT observations: %.2f" %(delta_t, m_exp)
#####Examples of exposure times for various magnitudes
if 10 < m_exp <= 11:
exptime = 120
elif 11 < m_exp <= 12:
exptime = 240
elif 12 < m_exp <= 13:
exptime = 360
elif 13 < m_exp <= 14:
exptime = 500
else:
exptime = -1
print "Exposure Time not Within Expected Range."
print "Recommended Exposure time: %s seconds" %exptime
#####Plotting
plt.semilogx(t,m)
plt.gca().invert_yaxis()
p1 = plt.scatter(t0, m0, c='g', label="Initial Magnitude")
p2 = plt.scatter(t0+delta_t, m_exp, c='r', label="Expected Magnitude at Observation")
plt.legend()
plt.xlabel("Time since Trigger [s]")
plt.ylabel("Magnitude")
plt.show()
print " "
| gpl-2.0 |
alcmrt/Machine-Learning | K Nearest Neighbor Classifier/k_nearest_neighbor_classifier.py | 1 | 6830 | """
K - Nearest Neighbor Classifier.
"""
import csv
import pandas as pd
import math
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import accuracy_score, classification_report, f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from collections import Counter
__author__ = "Alicem Murat ÖCAL"
##########################################################################
"""
Global variables
"""
# predict category of test data and get a list of predictions
predictions = []
# category/class of predictions
prediction_categories = []
# probabilities of predictions
prediction_probabilities = []
##########################################################################
# read data
data = pd.read_csv("data/train.csv", sep=",")
# split data set as x and y
y = data.iloc[:, 0]
X = data.iloc[:, 1:]
# normalize features by using mean normalization
for i in X:
X[i] = (X[i] - X[i].mean())/X[i].std()
# feature selection, get 200 best features applying by F-Test score.
X = SelectKBest(score_func=f_classif, k=200).fit_transform(X, y)
# split data as training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# convert pandas data frame to numpy.ndarray
y_train = y_train.values
y_test = y_test.values
def calculate_distance1(data1, data2):
"""
:param data1: a vector
:param data2: a vector
:return:
euclidean distance between data1 and data2
"""
# zip data1 and data2
zipped = zip(data1, data2)
# calculate distance between data1 and data2
distance = math.sqrt(sum([pow(a - b, 2) for (a, b) in zipped]))
return distance
def get_neighbors(X_train, test_sample, k):
"""
:param X_train: list of training samples
:param test_sample: a single test sample
:param k : number of neighbors
:return: training index list of k nearest neighbors of given test sample
"""
# empty distance list
distance_list = []
# training index list of k-nearest neighbors
neighbors = [] # this is not being used
# calculate distance between test_sample and all training_samples
for training_index in range(0, len(X_train)):
training_sample = X_train[training_index]
# calculate distance between test_sample and training_sample
distance = calculate_distance1(test_sample, training_sample)
# append training index and distance into distance_list
distance_list.append((training_index, distance))
# sort distance_list, from smallest distance to biggest
distance_list.sort(key=lambda tup: tup[1])
# get training index list of k-nearest neighbors
#neighbors = [x[0] for x in distance_list[0: k]]
# get class list of 5 nearest training neighbors
#class_list_of_nearest_neighbors = [y_train[i] for i in neighbors]
class_list_of_nearest_neighbors = [y_train[x[0]] for x in distance_list[0: k]]
# return class list of nearest neighbors
return class_list_of_nearest_neighbors
def detect_majority(class_list_of_nearest_neighbors):
"""
calculate probability and majority of a class
:param class_list_of_nearest_neighbors: list of classes of nearest neighbors
:return major category in classes_of_nearest_neighbors
"""
# count categories
count = Counter(class_list_of_nearest_neighbors)
# get probability of "class 1"
probability_of_class1 = 1/len(class_list_of_nearest_neighbors) * class_list_of_nearest_neighbors.count(1)
# return majority and probability of the major category
return count.most_common()[0][0], probability_of_class1
def write_file(file_path, data_matrix):
"""
write data to csv file
:param file_path: file path of data
:param data_matrix: the data that will be written
"""
# open file
with open(file_path, mode="w", newline='') as file:
writer = csv.writer(file)
writer.writerows(data_matrix)
def create_data_matrix():
"""
Create empty data_matrix to store estimated result probabilities of
test data and the real test results of test data.
"""
# create empty data_matrix to store estimated result probabilities of
# test data and the real results
row_number_of_data = len(X_test)
data_matrix = [[0.0] for j in range(row_number_of_data + 1)]
data_matrix[0] = ["real test categories", "estimated probabilities"]
# write real categories of test data and estimated class probabilities
# of test data into data_matrix
for j in range(1, row_number_of_data + 1):
data_matrix[j] = [y_test[j-1]] + [prediction_probabilities[j-1]]
# write results into a csv file.
write_file(file_path="data/results_of_knn.csv", data_matrix=data_matrix)
#########################################################################################
# list of predictions
predictions = []
# estimate categories of every sample in test data by using KNN Classifier.
for test_index in range(0, len(X_test)):
print('Classifying test instance number ' + str(test_index) + ":",)
# get class list of 3 nearest training neighbors
classes_of_nearest_neighbors = get_neighbors(X_train, X_test[test_index], 3)
# get major class of nearest neighbors and probability of "class 1"
major_class, category_1_probability = detect_majority(classes_of_nearest_neighbors)
# store predictions in predictions list for evaluate accuracy
predictions.append((major_class, category_1_probability))
# show estimations for every test sample
print("Probability of class 1=" + str(category_1_probability) + ", Predicted label=" + str(major_class) +
", Actual Label=" + str(y_test[test_index]))
print()
# category/class of predictions
prediction_categories = [x[0] for x in predictions]
# probabilities of predictions
prediction_probabilities = [x[1] for x in predictions]
"""
Create empty data_matrix to store estimated result probabilities of
test data and the real test results of test data.
"""
create_data_matrix()
# summarize performance of the classification
print('\nThe overall accuracy score of knn is: ' + str(accuracy_score(y_test, prediction_categories)))
print('\nThe overall f1 score of knn is: ' + str(f1_score(y_test, prediction_categories)))
print('\nThe overall roc_auc score of knn is: ' + str(roc_auc_score(y_test, prediction_categories)))
# create a classification report by using sklearn.
report = classification_report(y_test, prediction_categories, target_names=['0', '1'])
print('\n\nA detailed classification report: \n\n' + report)
| mit |
neuronalX/workshop_cellular_automaton | game_of_life_python.py | 1 | 2374 | # -----------------------------------------------------------------------------
# From Numpy to Python
# Copyright (2017) Nicolas P. Rougier - BSD license
# More information at https://github.com/rougier/numpy-book
# -----------------------------------------------------------------------------
def compute_neighbours(Z):
shape = len(Z), len(Z[0])
N = [[0, ]*(shape[0]) for i in range(shape[1])]
for x in range(1, shape[0]-1):
for y in range(1, shape[1]-1):
N[x][y] = Z[x-1][y-1]+Z[x][y-1]+Z[x+1][y-1] \
+ Z[x-1][y] +Z[x+1][y] \
+ Z[x-1][y+1]+Z[x][y+1]+Z[x+1][y+1]
return N
def iterate(Z):
shape = len(Z), len(Z[0])
N = compute_neighbours(Z)
for x in range(1, shape[0]-1):
for y in range(1, shape[1]-1):
if Z[x][y] == 1 and (N[x][y] < 2 or N[x][y] > 3):
Z[x][y] = 0
elif Z[x][y] == 0 and N[x][y] == 3:
Z[x][y] = 1
return Z
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
Z = [[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]
figure = plt.figure(figsize=(12, 3))
labels = ("Initial state",
"iteration 1", "iteration 2",
"iteration 3", "iteration 4")
for i in range(5):
ax = plt.subplot(1, 5, i+1, aspect=1, frameon=False)
for x in range(1, 5):
for y in range(1, 5):
if Z[x][y] == 1:
facecolor = 'black'
else:
facecolor = 'white'
rect = Rectangle((x, 5-y), width=0.9, height=0.9,
linewidth=1.0, edgecolor='black',
facecolor=facecolor)
ax.add_patch(rect)
ax.set_xlim(.9, 5.1)
ax.set_ylim(.9, 5.1)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(labels[i])
for tick in ax.xaxis.get_major_ticks():
tick.tick1On = tick.tick2On = False
for tick in ax.yaxis.get_major_ticks():
tick.tick1On = tick.tick2On = False
iterate(Z)
plt.tight_layout()
plt.savefig("glider.png")
plt.show()
| mit |
CDSFinance/zipline | tests/test_rolling_panel.py | 12 | 7118 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import MutableIndexRollingPanel, RollingPanel
from zipline.finance.trading import TradingEnvironment
class TestRollingPanel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
@classmethod
def tearDownClass(cls):
del cls.env
def test_alignment(self):
items = ('a', 'b')
sids = (1, 2)
dts = self.env.market_minute_window(
self.env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts[2:],
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
rp.extend_back(dts[:-2])
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts,
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
def test_get_current_multiple_call_same_tick(self):
"""
In old get_current, each call the get_current would copy the data. Thus
changing that object would have no side effects.
To keep the same api, make sure that the raw option returns a copy too.
"""
def data_id(values):
return values.__array_interface__['data']
items = ('a', 'b')
sids = (1, 2)
dts = self.env.market_minute_window(
self.env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
# each get_current call makea a copy
cur = rp.get_current()
cur2 = rp.get_current()
assert data_id(cur.values) != data_id(cur2.values)
# make sure raw follow same logic
raw = rp.get_current(raw=True)
raw2 = rp.get_current(raw=True)
assert data_id(raw) != data_id(raw2)
class TestMutableIndexRollingPanel(unittest.TestCase):
def test_basics(self, window=10):
items = ['bar', 'baz', 'foo']
minor = ['A', 'B', 'C', 'D']
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque(maxlen=window)
frames = {}
for i, date in enumerate(dates):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def test_adding_and_dropping_items(self, n_items=5, n_minor=10, window=10,
periods=30):
np.random.seed(123)
items = deque(range(n_items))
minor = deque(range(n_minor))
expected_items = deque(range(n_items))
expected_minor = deque(range(n_minor))
first_non_existant = max(n_items, n_minor) + 1
# We want to add new columns with random order
add_items = np.arange(first_non_existant, first_non_existant + periods)
np.random.shuffle(add_items)
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
expected_frames = deque(maxlen=window)
expected_dates = deque()
for i, (date, add_item) in enumerate(zip(dates, add_items)):
frame = pd.DataFrame(np.random.randn(n_items, n_minor),
index=items, columns=minor)
if i >= window:
# Old labels and dates should start to get dropped at every
# call
del frames[expected_dates.popleft()]
expected_minor.popleft()
expected_items.popleft()
expected_frames.append(frame)
expected_dates.append(date)
rp.add_frame(date, frame)
frames[date] = frame
result = rp.get_current()
np.testing.assert_array_equal(sorted(result.minor_axis.values),
sorted(expected_minor))
np.testing.assert_array_equal(sorted(result.items.values),
sorted(expected_items))
tm.assert_frame_equal(frame.T,
result.ix[frame.index, -1, frame.columns])
expected_result = pd.Panel(frames).swapaxes(0, 1)
tm.assert_panel_equal(expected_result,
result)
# Insert new items
minor.popleft()
minor.append(add_item)
items.popleft()
items.append(add_item)
expected_minor.append(add_item)
expected_items.append(add_item)
| apache-2.0 |
anurag313/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
otmaneJai/Zipline | zipline/utils/test_utils.py | 6 | 9495 | from contextlib import contextmanager
from itertools import (
product,
)
from logbook import FileHandler
from mock import patch
from numpy.testing import assert_array_equal
import operator
from zipline.finance.blotter import ORDER_STATUS
from zipline.utils import security_list
from six import (
itervalues,
)
from six.moves import filter
import os
import pandas as pd
import shutil
import tempfile
EPOCH = pd.Timestamp(0, tz='UTC')
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit='s', tz='UTC')
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return int((pd.Timestamp(s, tz='UTC') - EPOCH).total_seconds())
def setup_logger(test, path='test.log'):
test.log_handler = FileHandler(path)
test.log_handler.push_application()
def teardown_logger(test):
test.log_handler.pop_application()
test.log_handler.close()
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
class ExceptionSource(object):
def __init__(self):
pass
def get_hash(self):
return "ExceptionSource"
def __iter__(self):
return self
def next(self):
5 / 0
def __next__(self):
5 / 0
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(os.path.join(old_dir, subdir),
os.path.join(new_dir, subdir))
with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \
patch.object(security_list, 'using_copy', True,
create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, 'using_copy'):
raise Exception('add_security_data must be used within '
'security_list_copy context')
directory = os.path.join(
security_list.SECURITY_LISTS_DIR,
"leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, 'w') as f:
for sym in deletes:
f.write(sym)
f.write('\n')
add_path = os.path.join(directory, "add")
with open(add_path, 'w') as f:
for sym in adds:
f.write(sym)
f.write('\n')
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.utils.test_utils import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def make_rotating_asset_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'sid': range(num_assets),
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'asset_type': ['equity'] * num_assets,
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': 'TEST',
}
)
def make_simple_asset_info(assets, start_date, end_date, symbols=None):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
assets : array-like
start_date : pd.Timestamp
end_date : pd.Timestamp
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(assets)
if symbols is None:
symbols = [chr(ord('A') + i) for i in range(num_assets)]
return pd.DataFrame(
{
'sid': assets,
'symbol': symbols,
'asset_type': ['equity'] * num_assets,
'start_date': [start_date] * num_assets,
'end_date': [end_date] * num_assets,
'exchange': 'TEST',
}
)
def check_arrays(left, right, err_msg='', verbose=True):
"""
Wrapper around np.assert_array_equal that also verifies that inputs are
ndarrays.
See Also
--------
np.assert_array_equal
"""
if type(left) != type(right):
raise AssertionError("%s != %s" % (type(left), type(right)))
return assert_array_equal(left, right, err_msg=err_msg, verbose=True)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
| apache-2.0 |
awsteiner/o2sclpy | o2sclpy/plot_base.py | 1 | 82023 | # -------------------------------------------------------------------
#
# Copyright (C) 2006-2020, Andrew W. Steiner
#
# This file is part of O2sclpy.
#
# O2sclpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# O2sclpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with O2sclpy. If not, see <http://www.gnu.org/licenses/>.
#
# -------------------------------------------------------------------
#
import math
import numpy
import os
# For system type detection
import platform
# To create new color maps
from matplotlib.colors import LinearSegmentedColormap
# For rectangles and ellipses
import matplotlib.patches as patches
from o2sclpy.utils import parse_arguments, string_to_dict
from o2sclpy.utils import force_bytes, default_plot, get_str_array
class plot_base:
"""
A base class for plotting classes :py:class:`o2sclpy.plotter` and
:py:class:`o2sclpy.o2graph_plotter` . The principal purpose
of this class is just to provide some additional simplification
to python code which makes plots using matplotlib.
"""
cbar=0
"""
Colorbar?
"""
last_image=0
"""
The last image object created (used for addcbar)
"""
axes=0
"""
Axis object
"""
axes_dict={}
"""
Dictionary of axis objects
"""
fig=0
"""
Figure object
"""
canvas_flag=False
"""
If True, then the figure and axes objects have been created
(default False)
"""
# Quantities modified by set/get
logx=False
"""
If True, then use a logarithmic x axis (default False)
"""
logy=False
"""
If True, then use a logarithmic y axis (default False)
"""
logz=False
"""
If True, then use a logarithmic z axis (default False)
"""
xlo=0
"""
Lower limit for x axis (default 0)
"""
xhi=0
"""
Upper limit for x axis (default 0)
"""
xset=False
"""
If True, then the x axis limits have been set (default False)
"""
ylo=0
"""
Lower limit for y axis (default 0)
"""
yhi=0
"""
Upper limit for y axis (default 0)
"""
yset=False
"""
If True, then the y axis limits have been set (default False)
"""
zlo=0
"""
Lower limit for z axis (default 0)
"""
zhi=0
"""
Upper limit for z axis (default 0)
"""
zset=False
"""
If True, then the z axis limits have been set (default False)
"""
verbose=1
"""
Verbosity parameter (default 1)
"""
colbar=False
"""
If True, then include a color legend for density plots (default False)
"""
left_margin=0.14
"""
Left plot margin (default 0.14)
"""
right_margin=0.04
"""
Right plot margin (default 0.04)
"""
top_margin=0.04
"""
Top plot margin (default 0.04)
"""
bottom_margin=0.12
"""
Bottom plot margin (default 0.12)
"""
font=16
"""
Font size for :py:func:`o2sclpy.plot_base.text()`,
:py:func:`o2sclpy.plot_base.ttext()`, and axis titles (default
16). Axis labels are set by this size times 0.8 .
"""
fig_dict=''
"""
A dictionary which refers to the figure and axis defaults for
:py:func:`o2sclpy.default_plot()`. The default value is
``('fig_size_x=6.0,fig_size_y=6.0,ticks_in=False,'+
'rt_ticks=False,left_margin=0.14,right_margin=0.04,'+
'bottom_margin=0.12,top_margin=0.04,fontsize=16')`` . The x and y
sizes of the figure object are in fig_size_x and fig_size_y. The
value ticks_in refers to whether or not the ticks are inside or
outside the plot. The value of rt_ticks refers to whether or not
tick marks are plotted on the right and top sides of the plot. The
font size parameter is multiplied by 0.8 and then used for the
axis labels.
"""
ticks_in=False
"""
If true, move the ticks inside (default False)
"""
rt_ticks=False
"""
If true, include ticks on right side and top (default False)
"""
editor=False
"""
If true, open the editor
"""
ax_left_panel=0
ax_right_panel=0
def __init__(self):
"""
Desc
"""
self.new_cmaps()
def new_cmaps(self):
"""
Add a few new colormaps
"""
import matplotlib.pyplot as plot
if 'jet2' not in plot.colormaps():
# LinearSegmentedColormap
#
# Each row in the table for a given color is a sequence of x,
# y0, y1 tuples. In each sequence, x must increase
# monotonically from 0 to 1. For any input value z falling
# between x[i] and x[i+1], the output value of a given color
# will be linearly interpolated between y1[i] and y0[i+1]:
# Hence y0 in the first row and y1 in the last row are never used.
# This value is used to indicate values in the colormap
# tuples that are ignored by LinearSegmentedColormap()
unused=0.0
# A white to red colormap
cdict={'red': ((0.0,unused,1.0),(1.0,1.0,unused)),
'green': ((0.0,unused,1.0),(1.0,0.0,unused)),
'blue': ((0.0,unused,1.0),(1.0,0.0,unused))}
reds2=LinearSegmentedColormap('reds2',cdict)
plot.register_cmap(cmap=reds2)
# Colormap reds2, reversed
reds2_r=reds2.reversed()
plot.register_cmap(cmap=reds2_r)
# A new version of the ``jet`` colormap which starts with
# white instead of blue. In order, the index colors are white,
# blue, green, yellow, orange, and red
cdict={'red': ((0.0,unused,1.0),(0.2,0.0,0.0),
(0.4,0.0,0.0),(0.6,1.0,1.0),
(0.8,1.0,1.0),(1.0,1.0,unused)),
'green': ((0.0,unused,1.0),(0.2,0.0,0.0),
(0.4,0.5,0.5),(0.6,1.0,1.0),
(0.8,0.6,0.6),(1.0,0.0,unused)),
'blue': ((0.0,unused,1.0),(0.2,1.0,1.0),
(0.4,0.0,0.0),(0.6,0.0,0.0),
(0.8,0.0,0.0),(1.0,0.0,unused))}
jet2=LinearSegmentedColormap('jet2',cdict)
plot.register_cmap(cmap=jet2)
# Colormap jet2, reversed
jet2_r=jet2.reversed()
plot.register_cmap(cmap=jet2_r)
# A new version of the ``pastel`` colormap which starts with
# white instead of blue. In order, the index colors are white,
# blue, green, yellow, orange, and red
cdict={'red': ((0.0,unused,1.0),(0.2,0.3,0.3),
(0.4,0.3,0.3),(0.6,1.0,1.0),
(0.8,1.0,1.0),(1.0,1.0,1.0)),
'green': ((0.0,unused,1.0),(0.2,0.3,unused),
(0.4,0.5,0.5),(0.6,1.0,1.0),
(0.8,0.6,0.6),(1.0,0.3,unused)),
'blue': ((0.0,unused,1.0),(0.2,1.0,1.0),
(0.4,0.3,0.3),(0.6,0.3,0.3),
(0.8,0.3,0.3),(1.0,0.3,unused))}
pastel2=LinearSegmentedColormap('pastel2',cdict)
plot.register_cmap(cmap=pastel2)
# Colormap pastel2, reversed
pastel2_r=pastel2.reversed()
plot.register_cmap(cmap=pastel2_r)
# A white to green colormap
cdict={'red': ((0.0,unused,1.0),(1.0,0.0,unused)),
'green': ((0.0,unused,1.0),(1.0,1.0,unused)),
'blue': ((0.0,unused,1.0),(1.0,0.0,unused))}
greens2=LinearSegmentedColormap('greens2',cdict)
plot.register_cmap(cmap=greens2)
# Colormap greens2, reversed
greens2_r=greens2.reversed()
plot.register_cmap(cmap=greens2_r)
# A white to blue colormap
cdict={'red': ((0.0,unused,1.0),(1.0,0.0,unused)),
'green': ((0.0,unused,1.0),(1.0,0.0,unused)),
'blue': ((0.0,unused,1.0),(1.0,1.0,unused))}
blues2=LinearSegmentedColormap('blues2',cdict)
plot.register_cmap(cmap=blues2)
# Colormap blues2, reversed
blues2_r=blues2.reversed()
plot.register_cmap(cmap=blues2_r)
# End of function plot_base::new_cmaps()
return
def set(self,name,value):
"""
Set the value of parameter named ``name`` to value ``value``
"""
import matplotlib.pyplot as plot
if name=='logx':
if value=='False' or value=='0':
self.logx=False
else:
self.logx=True
elif name=='logy':
if value=='False' or value=='0':
self.logy=False
else:
self.logy=True
elif name=='logz':
if value=='False' or value=='0':
self.logz=False
else:
self.logz=True
elif name=='xlo':
if value[0]=='(':
self.xlo=float(eval(value))
else:
self.xlo=float(value)
self.xset=True
elif name=='xhi':
if value[0]=='(':
self.xhi=float(eval(value))
else:
self.xhi=float(value)
self.xset=True
elif name=='xset':
if value=='False' or value=='0':
self.xset=False
else:
self.xset=True
elif name=='ylo':
if value[0]=='(':
self.ylo=float(eval(value))
else:
self.ylo=float(value)
self.yset=True
elif name=='yhi':
if value[0]=='(':
self.yhi=float(eval(value))
else:
self.yhi=float(value)
self.yset=True
elif name=='yset':
if value=='False' or value=='0':
self.xset=False
else:
self.xset=True
elif name=='zlo':
if value[0]=='(':
self.zlo=float(eval(value))
else:
self.zlo=float(value)
self.zset=True
elif name=='zhi':
if value[0]=='(':
self.zhi=float(eval(value))
else:
self.zhi=float(value)
self.zset=True
elif name=='zset':
if value=='False' or value=='0':
self.zset=False
else:
self.zset=True
elif name=='usetex':
if value=='False' or value=='0':
self.usetex=False
plot.rc('text',usetex=False)
else:
self.usetex=True
plot.rc('text',usetex=True)
elif name=='editor':
if value=='False' or value=='0':
self.editor=False
else:
self.editor=True
elif name=='verbose':
self.verbose=int(value)
elif name=='colbar':
if value=='False' or value=='0':
self.colbar=False
else:
self.colbar=True
elif name=='font':
self.font=float(value)
elif name=='fig_dict':
self.fig_dict=value
elif name=='yt_resolution':
# Remove parenthesis
left_paren=value.find('(')
right_paren=value.find(')')
value=value[left_paren+1:right_paren]
# Then split into two values
value=value.split(',')
# And reformat as a list
self.yt_resolution=(int(value[0]),int(value[1]))
elif name=='yt_focus':
# We leave the focus as a string so we can parse
# it later
self.yt_focus=value
elif name=='yt_sigma_clip':
self.yt_sigma_clip=float(value)
elif name=='yt_position':
# We leave the position as a string so we can parse
# it later
self.yt_position=value
elif name=='yt_north':
# We leave the north as a string so we can parse
# it later
self.yt_north=value
elif name=='yt_width':
# We leave the width as a string so we can parse
# it later
self.yt_width=value
elif name=='yt_filter':
self.yt_filter=value
elif name=='yt_path':
self.yt_path=value
else:
print('No variable named',name)
if self.verbose>0:
print('Set',name,'to',value)
# End of function plot_base::set()
return
def get(self,name):
"""
Output the value of parameter named ``name``
"""
if name=='colbar':
print('The value of colbar is',self.colbar,'.')
if name=='logx':
print('The value of logx is',self.logx,'.')
if name=='logy':
print('The value of logy is',self.logy,'.')
if name=='logz':
print('The value of logz is',self.logz,'.')
if name=='verbose':
print('The value of verbose is',self.verbose,'.')
if name=='xhi':
print('The value of xhi is',self.xhi,'.')
if name=='xlo':
print('The value of xlo is',self.xlo,'.')
if name=='xset':
print('The value of xset is',self.xset,'.')
if name=='yhi':
print('The value of yhi is',self.yhi,'.')
if name=='ylo':
print('The value of ylo is',self.ylo,'.')
if name=='yset':
print('The value of yset is',self.yset,'.')
if name=='zhi':
print('The value of zhi is',self.zhi,'.')
if name=='zlo':
print('The value of zlo is',self.zlo,'.')
if name=='zset':
print('The value of zset is',self.zset,'.')
if name=='fig_dict':
print('The value of fig_dict is',self.fig_dict,'.')
if name=='yt_axis':
print('The value of yt_axis is',self.yt_axis,'.')
if name=='yt_axis_color':
print('The value of yt_axis_color is',self.yt_axis_color,'.')
if name=='yt_axis_labels_flat':
print('The value of yt_axis_labels_flat is',
self.yt_axis_labels_flat,'.')
if name=='yt_axis_resolution':
print('The value of yt_axis_resolution is',
self.yt_axis_resolution,'.')
if name=='yt_focus':
print('The value of yt_focus is',self.yt_focus,'.')
if name=='yt_sigma_clip':
print('The value of yt_sigma_clip is',self.yt_sigma_clip,'.')
if name=='yt_position':
print('The value of yt_position is',self.yt_position,'.')
if name=='yt_path':
print('The value of yt_path is',self.yt_path,'.')
# End of function plot_base::get()
return
def xlimits(self,xlo,xhi):
"""
Set the x-axis limits
"""
if xlo==xhi:
self.xset=False
return
self.xlo=xlo
self.xhi=xhi
self.xset=True
if self.canvas_flag==True:
self.axes.set_xlim(self.xlo,self.xhi)
if self.logx==True:
self.axes.set_xscale('log')
else:
self.axes.set_xscale('linear')
# End of function plot_base::xlimits()
return
def ylimits(self,ylo,yhi):
"""
Set the y-axis limits
"""
if ylo==yhi:
self.yset=False
return
self.ylo=ylo
self.yhi=yhi
self.yset=True
if self.canvas_flag==True:
self.axes.set_ylim(self.ylo,self.yhi)
if self.logy==True:
self.axes.set_yscale('log')
else:
self.axes.set_yscale('linear')
# End of function plot_base::ylimits()
return
def zlimits(self,zlo,zhi):
"""
Set the z-axis limits
"""
if zlo==zhi:
self.zset=False
return
self.zlo=zlo
self.zhi=zhi
self.zset=True
#if self.canvas_flag==True:
#plot.zlim([zlo,zhi])
# End of function plot_base::zlimits()
return
def line(self,x1,y1,x2,y2,**kwargs):
"""
Plot a line from :math:`(x_1,y_1)` to :math:`(x_2,y_2)`
"""
if self.verbose>2:
print('Line',x1,y1,x2,y1)
if self.canvas_flag==False:
self.canvas()
if isinstance(x1,str):
x1=float(eval(x1))
if isinstance(y1,str):
y1=float(eval(y1))
if isinstance(x2,str):
x2=float(eval(x2))
if isinstance(y2,str):
y2=float(eval(y2))
self.axes.plot([x1,x2],[y1,y2],**kwargs)
# End of function plot_base::line()
return
def arrow(self,x1,y1,x2,y2,arrowprops,**kwargs):
"""
Plot an arrow from :math:`(x_1,y_1)` to :math:`(x_2,y_2)`
"""
if self.verbose>2:
print('Arrow',x1,y1,x2,y1,arrowprops)
if self.canvas_flag==False:
self.canvas()
self.axes.annotate("",xy=(float(eval(x2)),float(eval(y2))),
xycoords='data',
xytext=(float(eval(x1)),float(eval(y1))),
textcoords='data',
arrowprops=string_to_dict(arrowprops))
# End of function plot_base::arrow()
return
def point(self,xval,yval,**kwargs):
"""
Plot a point at location (xval,yval)
"""
if self.verbose>2:
print('point',xval,yval,kwargs)
if self.canvas_flag==False:
self.canvas()
if isinstance(xval,str):
xval=float(eval(xval))
if isinstance(yval,str):
yval=float(eval(yval))
self.axes.plot([xval],[yval],**kwargs)
if self.xset==True:
self.axes.set_xlim(self.xlo,self.xhi)
if self.yset==True:
self.axes.set_ylim(self.ylo,self.yhi)
# End of function plot_base::point()
return
def error_point(self,xval,yval,err1=None,err2=None,
err3=None,err4=None,**kwargs):
"""
Plot a point at location (xval,yval)
"""
if err1=='None' or err1=='none':
err1=None
if err2=='None' or err2=='none':
err2=None
if err3=='None' or err3=='none':
err3=None
if err4=='None' or err4=='none':
err4=None
if self.verbose>2:
print('error-point',xval,yval,err1,err2,err3,err4,kwargs)
if self.canvas_flag==False:
self.canvas()
if isinstance(xval,str):
xval=float(eval(xval))
if isinstance(yval,str):
yval=float(eval(yval))
if err1==None and err2==None and err3==None and err4==None:
self.axes.point([float(eval(xval))],
[float(eval(yval))],**kwargs)
elif err3==None and err4==None:
if isinstance(err2,str):
err2=float(eval(err2))
if isinstance(err1,str):
err1=float(eval(err1))
self.axes.errorbar([xval],[yval],
yerr=[err2],xerr=[err1],**kwargs)
elif err2==None:
if isinstance(err4,str):
err4=float(eval(err4))
if isinstance(err3,str):
err3=float(eval(err3))
if isinstance(err1,str):
err1=float(eval(err1))
self.axes.errorbar([xval],[yval],
yerr=[[err3],[err4]],
xerr=[err1],**kwargs)
elif err4==None:
if isinstance(err2,str):
err2=float(eval(err2))
if isinstance(err1,str):
err1=float(eval(err1))
if isinstance(err3,str):
err3=float(eval(err3))
self.axes.errorbar([xval],[yval],yerr=[err3],
xerr=[[err1],[err2]],**kwargs)
else:
if isinstance(err2,str):
err2=float(eval(err2))
if isinstance(err1,str):
err1=float(eval(err1))
if isinstance(err4,str):
err4=float(eval(err4))
if isinstance(err3,str):
err3=float(eval(err3))
self.axes.errorbar([xval],[yval],
yerr=[[err3],[err4]],
xerr=[[err1],[err2]],**kwargs)
if self.xset==True:
self.axes.set_xlim(self.xlo,self.xhi)
if self.yset==True:
self.axes.set_ylim(self.ylo,self.yhi)
# End of function plot_base::point()
return
def rect(self,x1,y1,x2,y2,angle=0,**kwargs):
"""
Plot a rectangle from :math:`(x_1,y_1)` to :math:`(x_2,y_2)`
"""
if self.verbose>2:
print('Rect',x1,y1,x2,y1)
if self.canvas_flag==False:
self.canvas()
if isinstance(x1,str):
x1=float(eval(x1))
if isinstance(x2,str):
x2=float(eval(x2))
if isinstance(y1,str):
y1=float(eval(y1))
if isinstance(y2,str):
y2=float(eval(y2))
left=x1
if x2<x1:
left=x2
lower=y1
if y2<y1:
lower=y2
w=abs(x1-x2)
h=abs(y1-y2)
if self.canvas_flag==False:
self.canvas()
r=patches.Rectangle((left,lower),w,h,angle,**kwargs)
self.axes.add_patch(r)
# End of function plot_base::rect()
return
def ellipse(self,x,y,w,h,angle=0,**kwargs):
"""
Plot an ellipse
"""
if self.verbose>2:
print('Ellipse',x,y,w,h,angle)
if self.canvas_flag==False:
self.canvas()
fx=float(eval(x))
fy=float(eval(y))
fw=float(eval(w))
fh=float(eval(h))
fangle=float(eval(angle))
if self.canvas_flag==False:
self.canvas()
r=patches.Ellipse((fx,fy),fw,fh,fangle,**kwargs)
self.axes.add_patch(r)
# End of function plot_base::ellipse()
return
def show(self):
"""
Call the ``matplotlib`` show function.
"""
import matplotlib.pyplot as plot
if self.editor:
def disable(ax):
b=ax.get_position().bounds
if b[0]<1:
b2=[b[0]+1,b[1],b[2],b[3]]
else:
b2=[b[0],b[1],b[2],b[3]]
ax.set_position(b2)
return
def enable(ax):
b=ax.get_position().bounds
if b[0]>1:
b2=[b[0]-1,b[1],b[2],b[3]]
else:
b2=[b[0],b[1],b[2],b[3]]
ax.set_position(b2)
return
from matplotlib.widgets import Button, Slider, TextBox
plot.rc('text',usetex=False)
title=r'$ \mathrm{O}_2\mathrm{graph~Plot~Editor}$'
editor_title=self.ax_right_panel.text(0.02,0.955,title,
ha='left',va='center',
fontsize=16)
inst_text=('Begin by selecting a plot element above '+
'to modify.')
instructions=self.ax_right_panel.text(0.02,0.91,inst_text,
ha='left',va='center',
fontsize=16)
# The 'close' button
ax_close_button=plot.axes([0.915,0.93,0.08,0.06])
close_button=Button(ax_close_button,r'close')
close_button.label.set_size(14)
# The 'figure' button
ax_figure_button=plot.axes([0.505,0.81,0.1,0.06])
figure_button=Button(ax_figure_button,'figure')
figure_button.label.set_size(14)
# The 'axes' button
ax_axes_button=plot.axes([0.715,0.81,0.1,0.06])
axes_button=Button(ax_axes_button,'axes')
axes_button.label.set_size(14)
# The 'subplot' button
ax_subplot_button=plot.axes([0.61,0.81,0.1,0.06])
subplot_button=Button(ax_subplot_button,'subplot')
subplot_button.label.set_size(14)
# The 'text' button
ax_text_button=plot.axes([0.82,0.81,0.1,0.06])
text_button=Button(ax_text_button,'text')
text_button.label.set_size(14)
# figure margin sliders
ax_left_margin_slider=plot.axes([0.63,0.7,0.30,0.05],
facecolor='#bbbbbb')
left_margin_slider=Slider(ax_left_margin_slider,'left margin',
0,0.5,valinit=self.left_margin,
valstep=0.005,color='#777777')
left_margin_slider.label.set_size(14)
left_margin_slider.valtext.set_size(14)
ax_right_margin_slider=plot.axes([0.63,0.58,0.30,0.05],
facecolor='#bbbbbb')
right_margin_slider=Slider(ax_right_margin_slider,'right margin',
0,0.5,valinit=self.right_margin,
valstep=0.005,color='#777777')
right_margin_slider.label.set_size(14)
right_margin_slider.valtext.set_size(14)
ax_top_margin_slider=plot.axes([0.63,0.52,0.30,0.05],
facecolor='#bbbbbb')
top_margin_slider=Slider(ax_top_margin_slider,'top margin',
0,0.5,valinit=self.top_margin,
valstep=0.005,color='#777777')
top_margin_slider.label.set_size(14)
top_margin_slider.valtext.set_size(14)
ax_bottom_margin_slider=plot.axes([0.63,0.64,0.30,0.05],
facecolor='#bbbbbb')
bottom_margin_slider=Slider(ax_bottom_margin_slider,'bottom margin',
0,0.5,valinit=self.bottom_margin,
valstep=0.005,color='#777777')
bottom_margin_slider.label.set_size(14)
bottom_margin_slider.valtext.set_size(14)
disable(ax_left_margin_slider)
disable(ax_right_margin_slider)
disable(ax_top_margin_slider)
disable(ax_bottom_margin_slider)
# axis limit text boxes
ax_xlo_tbox=plot.axes([0.63,0.7,0.30,0.05])
ax_xhi_tbox=plot.axes([0.63,0.64,0.30,0.05])
if self.xset==False:
limt=self.axes.get_xlim()
self.xlo=limt[0]
self.xhi=limt[1]
xlo_tbox=TextBox(ax_xlo_tbox,'x low:',
initial=('%5.4g' % self.xlo))
xhi_tbox=TextBox(ax_xhi_tbox,'x high:',
initial=('%5.4g' % self.xhi))
xlo_tbox.label.set_size(14)
xhi_tbox.label.set_size(14)
ax_ylo_tbox=plot.axes([0.63,0.58,0.30,0.05])
ax_yhi_tbox=plot.axes([0.63,0.52,0.30,0.05])
if self.yset==False:
limt=self.axes.get_ylim()
self.ylo=limt[0]
self.yhi=limt[1]
ylo_tbox=TextBox(ax_ylo_tbox,'y low:',
initial=('%5.4g' % self.ylo))
yhi_tbox=TextBox(ax_yhi_tbox,'y high:',
initial=('%5.4g' % self.yhi))
ylo_tbox.label.set_size(14)
yhi_tbox.label.set_size(14)
disable(ax_xlo_tbox)
disable(ax_xhi_tbox)
disable(ax_ylo_tbox)
disable(ax_yhi_tbox)
# Callback for 'close editor' button
def close_editor(event):
self.fig.set_size_inches(6,6,forward=True)
disable(ax_close_button)
disable(ax_figure_button)
disable(ax_axes_button)
disable(ax_text_button)
disable(ax_subplot_button)
#ax_close_button.set_position([1.1,1.1,0.1,0.1])
#ax_figure_button.set_position([1.1,1.1,0.1,0.1])
#ax_axes_button.set_position([1.1,1.1,0.1,0.1])
#ax_text_button.set_position([1.1,1.1,0.1,0.1])
#ax_subplot_button.set_position([1.1,1.1,0.1,0.1])
disable(ax_xlo_tbox)
disable(ax_xhi_tbox)
disable(ax_ylo_tbox)
disable(ax_yhi_tbox)
#ax_xlo_tbox.set_position([1,1,0.111,0.111])
#ax_xhi_tbox.set_position([1,1,0.112,0.112])
#ax_ylo_tbox.set_position([1,1,0.113,0.113])
#ax_yhi_tbox.set_position([1,1,0.114,0.114])
disable(ax_left_margin_slider)
disable(ax_right_margin_slider)
disable(ax_top_margin_slider)
disable(ax_bottom_margin_slider)
#ax_left_margin_slider.set_position([1,1,0.103,0.103])
#ax_bottom_margin_slider.set_position([1,1,0.104,0.104])
#ax_right_margin_slider.set_position([1,1,0.105,0.105])
#ax_top_margin_slider.set_position([1,1,0.106,0.106])
self.ax_left_panel.set_position([0,0,1,1])
self.ax_right_panel.set_position([1,1,1,1])
self.axes.set_position([self.left_margin,
self.bottom_margin,
1.0-self.left_margin-self.right_margin,
1.0-self.top_margin-self.bottom_margin])
self.fig.canvas.draw_idle()
return
def figure_editor(event):
instructions.set_text('')
enable(ax_left_margin_slider)
enable(ax_right_margin_slider)
enable(ax_top_margin_slider)
enable(ax_bottom_margin_slider)
disable(ax_xlo_tbox)
disable(ax_xhi_tbox)
disable(ax_ylo_tbox)
disable(ax_yhi_tbox)
self.fig.canvas.draw_idle()
return
def axes_editor(event):
instructions.set_text('')
disable(ax_left_margin_slider)
disable(ax_right_margin_slider)
disable(ax_top_margin_slider)
disable(ax_bottom_margin_slider)
enable(ax_xlo_tbox)
enable(ax_xhi_tbox)
enable(ax_ylo_tbox)
enable(ax_yhi_tbox)
self.fig.canvas.draw_idle()
return
def margin_update():
self.axes.set_position([self.left_margin/2.0,
self.bottom_margin,
(1.0-self.left_margin-
self.right_margin)/2.0,
1.0-self.top_margin-self.bottom_margin])
self.fig.canvas.draw_idle()
return
def left_margin_update(val):
self.left_margin=val
margin_update()
return
def right_margin_update(val):
self.right_margin=val
margin_update()
return
def bottom_margin_update(val):
self.bottom_margin=val
margin_update()
return
def top_margin_update(val):
self.top_margin=val
margin_update()
return
def xlim_lo_update(val):
self.axes.set_xlim([float(val),self.xhi])
return
def xlim_hi_update(val):
self.axes.set_xlim([self.xlo,float(val)])
return
def ylim_lo_update(val):
self.axes.set_ylim([float(val),self.yhi])
return
def ylim_hi_update(val):
self.axes.set_ylim([self.ylo,float(val)])
return
close_button.on_clicked(close_editor)
figure_button.on_clicked(figure_editor)
axes_button.on_clicked(axes_editor)
left_margin_slider.on_changed(left_margin_update)
right_margin_slider.on_changed(right_margin_update)
top_margin_slider.on_changed(top_margin_update)
bottom_margin_slider.on_changed(bottom_margin_update)
xlo_tbox.on_submit(xlim_lo_update)
xhi_tbox.on_submit(xlim_hi_update)
ylo_tbox.on_submit(ylim_lo_update)
yhi_tbox.on_submit(ylim_hi_update)
plot.rc('text',usetex=True)
plot.show()
# End of function plot_base::show()
return
def save(self,filename):
"""
Save plot to file named ``filename``. If the verbose parameter is
greater than zero, then this function prints the filename to
the screen.
"""
import matplotlib.pyplot as plot
if self.verbose>0:
print('Saving as',filename,'.')
plot.savefig(filename)
# End of function plot_base::save()
return
def ttext(self,tx,ty,textstr,**kwargs):
"""
Plot text in the native coordinate system using a transAxes
transformation. This function uses the class font size and and
centering in the horizontal and vertical directions by
default. A figure and axes are created using
:py:func:`o2sclpy.plot_base.canvas()`, if they have not been
created already. If ``tx`` and ``ty`` are strings, then they
are passed through the ``eval()`` function and converted to
floating-point numbers.
"""
if self.canvas_flag==False:
self.canvas()
ha_present=False
for key in kwargs:
if key=='ha' or key=='horizontalalignment':
ha_present=True
if ha_present==False:
kwargs=dict(kwargs,ha='center')
va_present=False
for key in kwargs:
if key=='va' or key=='verticalalignment':
va_present=True
if va_present==False:
kwargs=dict(kwargs,va='center')
fontsize_present=False
for key in kwargs:
if key=='fontsize':
fontsize_present=True
if fontsize_present==False:
kwargs=dict(kwargs,fontsize=self.font)
transform_present=False
for key in kwargs:
if key=='transform':
transform_present=True
if transform_present==False:
kwargs=dict(kwargs,transform=self.axes.transAxes)
if isinstance(tx,str):
tx=float(eval(tx))
if isinstance(ty,str):
ty=float(eval(ty))
self.axes.text(tx,ty,textstr,**kwargs)
# End of function plot_base::ttext()
return
def text(self,tx,ty,textstr,**kwargs):
"""Plot text in the axis coordinate system transforming using the
class font size and and centering in the horizontal and
vertical directions by default. A figure and axes are created
using :py:func:`o2sclpy.plot_base.canvas()`, if they have not
been created already. If ``tx`` and ``ty`` are strings, then
they are passed through the ``eval()`` function and converted
to floating-point numbers.
"""
if self.canvas_flag==False:
self.canvas()
ha_present=False
for key in kwargs:
if key=='ha' or key=='horizontalalignment':
ha_present=True
if ha_present==False:
kwargs=dict(kwargs,ha='center')
va_present=False
for key in kwargs:
if key=='va' or key=='verticalalignment':
va_present=True
if va_present==False:
kwargs=dict(kwargs,va='center')
fontsize_present=False
for key in kwargs:
if key=='fontsize':
fontsize_present=True
if fontsize_present==False:
kwargs=dict(kwargs,fontsize=self.font)
if isinstance(tx,str):
tx=float(eval(tx))
if isinstance(ty,str):
ty=float(eval(ty))
self.axes.text(tx,ty,textstr,**kwargs)
# End of function plot_base::text()
return
def textbox(self,tx,ty,strt,boxprops='',**kwargs):
"""
Plot text in the axis coordinate system with a box
"""
if self.canvas_flag==False:
self.canvas()
ha_present=False
for key in kwargs:
if key=='ha':
ha_present=True
if ha_present==False:
kwargs=dict(kwargs,ha='center')
va_present=False
for key in kwargs:
if key=='va':
va_present=True
if va_present==False:
kwargs=dict(kwargs,va='center')
fontsize_present=False
for key in kwargs:
if key=='fontsize':
fontsize_present=True
if fontsize_present==False:
kwargs=dict(kwargs,fontsize=self.font)
if isinstance(tx,str):
tx=float(eval(tx))
if isinstance(ty,str):
ty=float(eval(ty))
self.axes.text(tx,ty,strt,
transform=self.axes.transAxes,
bbox=string_to_dict(boxprops),**kwargs)
# End of function plot_base::textbox()
return
def subplots(self,nr,nc=1,**kwargs):
"""
Create ``nr`` rows and ``nc`` columns of subplots. The axis
objects are extracted and placed in a (one-dimensional) list
in ``axes_dict``.
"""
import matplotlib.pyplot as plot
plot.rc('text',usetex=True)
plot.rc('font',family='serif')
plot.rcParams['lines.linewidth']=0.5
dct=string_to_dict(self.fig_dict)
if not('fig_size_x' in dct):
dct['fig_size_x']=6.0
if not('fig_size_y' in dct):
dct['fig_size_y']=6.0
# Make the call to subplots()
self.fig,axis_temp=plot.subplots(nrows=nr,ncols=nc,
figsize=(dct["fig_size_x"],
dct["fig_size_y"]))
# Reformulate the axis objects into the axes_dict
nsub=0
if nr==1 and nc==1:
self.axes_dict["subplot0"]=axis_temp
print('Created new axes named subplot0.')
nsub=1
elif nr==1:
for i in range(0,nc):
self.axes_dict["subplot"+str(i)]=axis_temp[i]
print('Created new axes named subplot'+str(i)+'.')
nsub=nc
elif nc==1:
for i in range(0,nr):
self.axes_dict["subplot"+str(i)]=axis_temp[i]
print('Created new axes named subplot'+str(i)+'.')
nsub=nr
else:
cnt=0
for i in range(0,nr):
for j in range(0,nc):
self.axes_dict["subplot"+str(cnt)]=axis_temp[i][j]
print('Created new axes named subplot'+str(cnt)+'.')
cnt=cnt+1
nsub=cnt
# Apply default preferences to the axes, similar to
# default_plot().
for i in range(0,nsub):
axt=self.axes_dict["subplot"+str(i)]
axt.minorticks_on()
axt.tick_params('both',length=12,width=1,which='major')
axt.tick_params('both',length=5,width=1,
which='minor')
axt.tick_params(labelsize=self.font*0.8)
# Flip the canvas flag
self.canvas_flag=True
# End of function plot_base::subplots()
return
def xtitle(self,textstr):
"""
Add a title for the x-axis
"""
# Note that this function no longer works inside of yt
# visualizations because they need to be accessible for
# annotations on top of yt
if textstr!='' and textstr!='none':
if self.canvas_flag==False:
self.canvas()
self.axes.set_xlabel(textstr,fontsize=self.font)
# End of function plot_base::xtitle()
return
def ytitle(self,textstr):
"""
Add a title for the y-axis
"""
# Note that this function no longer works inside of yt
# visualizations because they need to be accessible for
# annotations on top of yt
if textstr!='' and textstr!='none':
if self.canvas_flag==False:
self.canvas()
self.axes.set_ylabel(textstr,fontsize=self.font)
# End of function plot_base::ytitle()
return
def selax(self,name=''):
"""
Select an axis from the current list of axes
"""
if name=='':
print('Axes names:',self.axes_dict.keys())
elif len(name)==1:
self.axes=self.axes_dict["subplot"+name]
else:
self.axes=self.axes_dict[name]
# End of function plot_base::selax()
return
def inset(self,left,bottom,width,height,**kwargs):
"""
Create a new axis inside the current figure?
Useful kwargs are projection (None, 'aitoff', 'hammer', 'lambert',
'mollweide', 'polar', 'rectilinear', str}) and polar (T/F)
and many other axis kwargs (which may be difficult to
modify in this simplified form)
"""
# Create a unique axes label i.e. inset0
ifound=9
for i in range(0,8):
if ifound==9:
axname="inset"+str(i)
if axname not in self.axes_dict:
ifound=i
axname="inset"+str(ifound)
self.axes=self.fig.add_axes([left,bottom,width,height],
label=axname)
self.axes_dict[axname]=self.axes
print('Created new axes named',axname)
# the same defaults as default_plot()
self.axes.minorticks_on()
self.axes.tick_params('both',length=12,width=1,which='major')
self.axes.tick_params('both',length=5,width=1,which='minor')
self.axes.tick_params(labelsize=self.font*0.8)
def modax(self,**kwargs):
"""
Modify the current axes properties
"""
import matplotlib.pyplot as plot
if 'x_major_loc' in kwargs:
self.axes.get_xaxis().set_major_locator(plot.MultipleLocator
(float(kwargs['x_major_loc'])))
if 'x_minor_loc' in kwargs:
self.axes.get_xaxis().set_minor_locator(plot.MultipleLocator
(float(kwargs['x_minor_loc'])))
if 'y_major_loc' in kwargs:
self.axes.get_yaxis().set_major_locator(plot.MultipleLocator
(float(kwargs['y_major_loc'])))
if 'y_minor_loc' in kwargs:
self.axes.get_yaxis().set_minor_locator(plot.MultipleLocator
(float(kwargs['y_minor_loc'])))
if 'x_visible' in kwargs:
if kwargs['x_visible']=='False':
self.axes.get_xaxis().set_visible(False)
if 'y_visible' in kwargs:
if kwargs['y_visible']=='False':
self.axes.get_yaxis().set_visible(False)
if 'labelsize' in kwargs:
self.axes.tick_params(labelsize=float(kwargs['labelsize']))
if 'alpha' in kwargs:
self.axes.patch.set_alpha(float(kwargs['alpha']))
if 'y_loc' in kwargs:
if kwargs['y_loc']=='rl' or kwargs['y_loc']=='lr':
self.axes.tick_params('y',which='both',
right=True,left=True,labelright=True,
labelleft=True)
elif kwargs['y_loc']=='l':
self.axes.tick_params('y',which='both',left=True,
labelleft=True,right=False,
labelright=False)
elif kwargs['y_loc']=='r':
self.axes.tick_params('y',which='both',right=True,
labelright=True,left=False,
labelleft=False)
if 'x_loc' in kwargs:
if kwargs['x_loc']=='bt' or kwargs['x_loc']=='tb':
self.axes.tick_params('x',which='both',bottom=True,top=True,
labelbottom=True,labeltop=True)
elif kwargs['x_loc']=='t':
self.axes.tick_params('x',which='both',top=True,
labeltop=True,bottom=False,
labelbottom=False)
elif kwargs['x_loc']=='b':
self.axes.tick_params('x',which='both',bottom=True,
labelbottom=True,top=False,
labeltop=False)
if 'x_tick_dir' in kwargs:
self.axes.tick_params('x',which='major',
direction=kwargs['x_tick_dir'])
if 'x_minor_tick_dir' in kwargs:
self.axes.tick_params('x',which='minor',
direction=kwargs['x_minor_tick_dir'])
if 'y_tick_dir' in kwargs:
self.axes.tick_params('y',which='major',
direction=kwargs['y_tick_dir'])
if 'y_minor_tick_dir' in kwargs:
self.axes.tick_params('y',which='minor',
direction=kwargs['y_minor_tick_dir'])
if 'x_tick_len' in kwargs:
self.axes.tick_params('x',which='major',
length=kwargs['x_tick_len'])
if 'x_minor_tick_len' in kwargs:
self.axes.tick_params('x',which='minor',
length=kwargs['x_minor_tick_len'])
if 'y_tick_len' in kwargs:
self.axes.tick_params('y',which='major',
length=float(kwargs['y_tick_len']))
if 'y_minor_tick_len' in kwargs:
self.axes.tick_params('y',which='minor',
length=float(kwargs['y_minor_tick_len']))
if 'x_tick_wid' in kwargs:
self.axes.tick_params('x',which='major',
width=kwargs['x_tick_wid'])
if 'x_minor_tick_wid' in kwargs:
self.axes.tick_params('x',which='minor',
width=kwargs['x_minor_tick_wid'])
if 'y_tick_wid' in kwargs:
self.axes.tick_params('y',which='major',
width=float(kwargs['y_tick_wid']))
if 'y_minor_tick_wid' in kwargs:
self.axes.tick_params('y',which='minor',
width=float(kwargs['y_minor_tick_wid']))
return
def addcbar(self,left,bottom,width,height,image='last',cmap='',**kwargs):
"""
Add a new colorbar or a colorbar from the most recently created
image at the location specified by ``left``, ``bottom``,
``width`` and ``height``. If the image keyword is 'last',
then the last density plot (command 'den-plot') or 2d
histogram plot (command 'hist2d-plot') is used. If
the image keyword is 'new', then a colormap must be
specified using the 'cmap' keyword and the color map is
used to create the colorbar.
"""
import matplotlib.pyplot as plot
# Create a unique axes label i.e. cbar0
ifound=9
for i in range(0,8):
if ifound==9:
axname="cbar"+str(i)
if axname not in self.axes_dict:
ifound=i
axname="cbar"+str(ifound)
if image=='last':
self.axes=self.fig.add_axes([left,bottom,width,height])
self.axes_dict[axname]=self.axes
print('Created new axes named',axname)
cbar=self.fig.colorbar(self.last_image,cax=self.axes,**kwargs)
cbar.ax.tick_params(labelsize=self.font*0.8)
elif image=='new':
self.axes=self.fig.add_axes([left,bottom,width,height])
# This doesn't work and I'm not quite sure why yet
#axis_temp.set_frame_on(False)
self.axes_dict[axname]=self.axes
print('Created new axes named',axname)
if cmap=='':
print('New colorbar needs colormap in addcbar().')
return
tempsm=plot.cm.ScalarMappable(cmap=cmap,
norm=plot.Normalize(vmin=0,vmax=1))
cbar=self.fig.colorbar(tempsm,cax=self.axes,
orientation='horizontal')
cbar.ax.tick_params(labelsize=0,length=0)
else:
print('Invalid value of image in addcbar().')
return
# End of function plot_base::addcbar()
return
def canvas(self):
"""
This function creates a default figure using default_plot()
and axis object using the xtitle and ytitle for the
axis titles and xlo, xhi, ylo, and yhi for the axis limits.
"""
if self.verbose>2:
print('Canvas',self.fig_dict)
dct=string_to_dict(self.fig_dict)
if 'fontsize' not in dct.keys():
dct['fontsize']=self.font
if self.editor:
(self.fig,self.axes,self.ax_left_panel,
self.ax_right_panel)=default_plot(**dct,editor=True)
else:
(self.fig,self.axes)=default_plot(**dct)
# Add axes object to the dictionary
self.axes_dict["main"]=self.axes
# Plot limits
if self.xset==True:
self.axes.set_xlim(self.xlo,self.xhi)
if self.yset==True:
self.axes.set_ylim(self.ylo,self.yhi)
# Set log mode for x and y axes if requested
if self.logx==True:
self.axes.set_xscale('log')
if self.logy==True:
self.axes.set_yscale('log')
self.canvas_flag=True
# End of function plot_base::canvas()
return
# def move_labels(self):
# """
# Move tick labels
# """
# for label in self.axes.get_xticklabels():
# t=label.get_position()
# t2=t[0],t[1]-0.01
# label.set_position(t2)
# label.set_fontsize(16)
# for label in self.axes.get_yticklabels():
# t=label.get_position()
# t2=t[0]-0.01,t[1]
# label.set_position(t2)
# label.set_fontsize(16)
# # End of function plot_base::move_labels()
# return
class yt_plot_base(plot_base):
"""
A base class with simplifications for plots generated in yt
"""
# yt settings modifiable by get and set
yt_filter=''
"""
Filter for yt images. If non-empty, must contain the
strings '%i' for input file and '%o' for output file. A typical
example is something like
convert -contrast-stretch 0 %i %o
which uses imagemagick to adjust the color curve.
"""
yt_resolution=(512,512)
"""
Resolution for yt rendering (default (512,512))
"""
yt_focus='default'
"""
yt camera focus as a string. The string 'default' is equivalent
to '[0.5,0.5,0.5] internal'. Either in the 'internal' or 'user'
unit system.
"""
yt_position='default'
"""
yt camera position as a string. The string 'default' is equivalent
to '[1.5,0.6,0.7] internal'. Either in the 'internal' or 'user'
unit system.
"""
yt_width='default'
"""
yt camera width as a string. The string 'default' is equivalent to
'[1.5,1.5,1.5]'. Always in the internal unit system.
"""
yt_north='default'
"""
yt camera north vector string. The string 'default' is equivalent to
'[1.0,0.0,0.0]'. Always in the internal unit system.
"""
yt_sigma_clip=4.0
"""
The sigma_clip parameter for yt (default 4.0)
"""
# Other yt settings
yt_path=[]
"""
yt animation path (default []), as list of lists. The
list contains instructions such as
['yaw',100,0.01]
['zoom',100,2.0]
...
where the first entry in each sublist is always a type
move, and the second entry in each sublist is always the
number of frames over which to complete the move.
Note that this is not set using -set or -get but by the
'yt-path' command.
"""
yt_ann=[]
"""
Annotations for yt renders. This list is controlled by
the 'yt-ann' command.
"""
yt_trans=0
"""
Transformation for yt figure annotations
"""
yt_tf=0
"""
The yt transfer function
"""
yt_vol_keynames=[]
"""
Current list of volume keynames
"""
yt_volume_data=[]
"""
Current list of data objects for volume sources
"""
yt_volume_bbox=[]
"""
Current list of bbox arrays for volume sources
"""
yt_vols=[]
"""
Current list of volume source objects
"""
yt_data_sources=[]
"""
Current list of yt data source objects
"""
yt_text_objects=[]
"""
Current list of yt data source objects
"""
# Yt scene and camera
yt_scene=0
"""
The yt scene object
"""
yt_created_scene=False
"""
If true, then the yt scene object has been created
"""
yt_camera=0
"""
The yt camera object
"""
yt_created_camera=False
"""
If true, then the yt camera object has been created
"""
def yt_update_text(self):
"""
Update the text objects during an animation by removing them from
the scene and adding them back.
"""
for i in range(0,len(self.yt_text_objects)):
if self.yt_text_objects[i][1]==True:
# Remove previous object
del self.yt_scene.sources[self.yt_text_objects[i][0]]
# Now add it back
self.yt_text_to_scene([self.yt_text_objects[i][2],
self.yt_text_objects[i][3],
self.yt_text_objects[i][4]],
self.yt_text_objects[i][5],
textcolor=self.yt_text_objects[i][6],
dpi=self.yt_text_objects[i][7],
scale=self.yt_text_objects[i][8],
font=self.yt_text_objects[i][9],
keyname=self.yt_text_objects[i][0])
# End of function plot_base::yt_update_text()
return
def yt_line(self,point1,point2,color=[1.0,1.0,1.0,0.5],
coords='user',keyname='o2sclpy_line'):
"""
Plot a line in a yt volume visualization.
"""
from yt.visualization.volume_rendering.api \
import LineSource
x1=point1[0]
x2=point2[0]
y1=point1[1]
y2=point2[1]
z1=point1[2]
z2=point2[2]
if self.xset==False:
if x1<x2:
self.xlo=x1
self.xhi=x2
else:
self.xlo=x2
self.xhi=x1
print('Set xlimits to',self.xlo,self.xhi)
self.xset=True
if self.yset==False:
if y1<y2:
self.ylo=y1
self.yhi=y2
else:
self.ylo=y2
self.yhi=y1
print('Set ylimits to',self.ylo,self.yhi)
self.yset=True
if self.zset==False:
if z1<z2:
self.zlo=z1
self.zhi=z2
else:
self.zlo=z2
self.zhi=z1
print('Set zlimits to',self.zlo,self.zhi)
self.zset=True
icnt=0
if self.yt_scene!=0:
for key, value in self.yt_scene.sources.items():
icnt=icnt+1
if icnt==0:
self.yt_def_vol()
# Coordinate transformation
if coords!='internal':
x1=(x1-self.xlo)/(self.xhi-self.xlo)
y1=(y1-self.ylo)/(self.yhi-self.ylo)
z1=(z1-self.zlo)/(self.zhi-self.zlo)
x2=(x2-self.xlo)/(self.xhi-self.xlo)
y2=(y2-self.ylo)/(self.yhi-self.ylo)
z2=(z2-self.zlo)/(self.zhi-self.zlo)
# Convert color to [r,g,b,a] for yt
from matplotlib.colors import to_rgba
colt=to_rgba(color)
colt2=[colt[0],colt[1],colt[2],colt[3]]
colors=[colt2]
vertices=numpy.array([[[(x1-self.xlo)/(self.xhi-self.xlo),
(y1-self.ylo)/(self.yhi-self.ylo),
(z1-self.zlo)/(self.zhi-self.zlo)],
[(x2-self.xlo)/(self.xhi-self.xlo),
(y2-self.ylo)/(self.yhi-self.ylo),
(z2-self.zlo)/(self.zhi-self.zlo)]]])
colors=numpy.array([colt2])
ls=LineSource(vertices,colors)
print('o2graph:yt-line: Adding line source.')
kname=self.yt_unique_keyname(keyname)
self.yt_scene.add_source(ls,keyname=kname)
# End of function plot_base::yt_line()
return
def yt_arrow(self,point1,point2,color=[1.0,1.0,1.0,0.5],n_lines=40,
frac_length=0.05,radius=0.0125,coords='user',
keyname='o2sclpy_arrow'):
"""
Plot an arrow in a yt volume visualization.
"""
from yt.visualization.volume_rendering.api \
import LineSource
x1=point1[0]
x2=point2[0]
y1=point1[1]
y2=point2[1]
z1=point1[2]
z2=point2[2]
if self.xset==False:
if x1<x2:
self.xlo=x1
self.xhi=x2
else:
self.xlo=x2
self.xhi=x1
print('Set xlimits to',self.xlo,self.xhi)
self.xset=True
if self.yset==False:
if y1<y2:
self.ylo=y1
self.yhi=y2
else:
self.ylo=y2
self.yhi=y1
print('Set ylimits to',self.ylo,self.yhi)
self.yset=True
if self.zset==False:
if z1<z2:
self.zlo=z1
self.zhi=z2
else:
self.zlo=z2
self.zhi=z1
print('Set zlimits to',self.zlo,self.zhi)
self.zset=True
icnt=0
if self.yt_scene!=0:
for key, value in self.yt_scene.sources.items():
icnt=icnt+1
if icnt==0:
self.yt_def_vol()
# Coordinate transformation
if coords!='internal':
x1=(x1-self.xlo)/(self.xhi-self.xlo)
y1=(y1-self.ylo)/(self.yhi-self.ylo)
z1=(z1-self.zlo)/(self.zhi-self.zlo)
x2=(x2-self.xlo)/(self.xhi-self.xlo)
y2=(y2-self.ylo)/(self.yhi-self.ylo)
z2=(z2-self.zlo)/(self.zhi-self.zlo)
# Arrow line
vertices=[[[x1,y1,z1],[x2,y2,z2]]]
from matplotlib.colors import to_rgba
colt=to_rgba(color)
colt2=[colt[0],colt[1],colt[2],colt[3]]
colors=[colt2]
# First convert the arrow to polar coordinates
rarr=math.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)
parr=math.atan2(y2-y1,x2-x1)
tarr=math.acos((z2-z1)/rarr)
# Arrow head
for theta in range(0,n_lines):
for z in range(1,2):
# Construct a vector from the tail of the arrow to the
# outer circle beneath the arrow head presuming the
# arrow is at (0,0,1)
vec=[radius*math.cos(theta/n_lines*2.0*math.pi),
radius*math.sin(theta/n_lines*2.0*math.pi),
1-frac_length]
# First transform by rotating the polar angle
mat=numpy.array([[math.cos(tarr),0,math.sin(tarr)],
[0,1,0],
[math.sin(tarr),0,math.cos(tarr)]])
vec=mat.dot(vec)
# Then transform by rotating the azimuthal angle
mat=numpy.array([[math.cos(parr),math.sin(parr),0],
[math.sin(parr),math.cos(parr),0],
[0,0,1]])
vec=mat.dot(vec)
# Rescale by the original vector length and translate
# to the tail of the vector
xnew=rarr*vec[0]+x1
ynew=rarr*vec[1]+y1
znew=rarr*vec[2]+z1
# Add the lines to the list for the LineSource
vertices.append([[x2,y2,z2],[xnew,ynew,znew]])
colors.append(colt2)
arrow_source=LineSource(numpy.array(vertices),numpy.array(colors))
kname=self.yt_unique_keyname(keyname)
self.yt_scene.add_source(arrow_source,keyname=kname)
# End of function plot_base::yt_arrow()
return
def yt_del_source(self,keyname):
"""
Delete a yt source
o2sclpy has to keep track of the sources for two reasons (i)
to make sure volme sources refer to valid memory and (ii) to
be able to move text objects between renders in an animation.
Thus, this function is required to remove a source from both
the yt scene and from the internal o2sclpy lists.
"""
# Remove from the text objects list
for i in range(0,len(yt_text_objects)):
if yt_text_objects[i][0]==keyname:
del yt_text_objects[i]
# Remove from the volume objects lists
for i in range(0,len(yt_vol_keynames)):
if yt_vol_keynames[i]==keyname:
del yt_vol_keynames[i]
del yt_volume_data[i]
del yt_volume_bbox[i]
del yt_vols[i]
del yt_data_sources[i]
# Now remove it from the scene
del self.yt_scene.sources[keyname]
# End of function plot_base::yt_del_source()
return
def yt_box(self,point1,point2,color=[1.0,1.0,1.0,0.5],
coords='user',keyname='o2sclpy_box'):
"""
Create a box in a yt visualization.
"""
from yt.visualization.volume_rendering.api \
import BoxSource
x1=point1[0]
x2=point2[0]
y1=point1[1]
y2=point2[1]
z1=point1[2]
z2=point2[2]
if self.xset==False:
if x1<x2:
self.xlo=x1
self.xhi=x2
else:
self.xlo=x2
self.xhi=x1
print('Set xlimits to',self.xlo,self.xhi)
self.xset=True
if self.yset==False:
if y1<y2:
self.ylo=y1
self.yhi=y2
else:
self.ylo=y2
self.yhi=y1
print('Set ylimits to',self.ylo,self.yhi)
self.yset=True
if self.zset==False:
if z1<z2:
self.zlo=z1
self.zhi=z2
else:
self.zlo=z2
self.zhi=z1
print('Set zlimits to',self.zlo,self.zhi)
self.zset=True
icnt=0
if self.yt_scene!=0:
for key, value in self.yt_scene.sources.items():
icnt=icnt+1
if icnt==0:
self.yt_def_vol()
# Coordinate transformation
if coords!='internal':
x1=(x1-self.xlo)/(self.xhi-self.xlo)
y1=(y1-self.ylo)/(self.yhi-self.ylo)
z1=(z1-self.zlo)/(self.zhi-self.zlo)
x2=(x2-self.xlo)/(self.xhi-self.xlo)
y2=(y2-self.ylo)/(self.yhi-self.ylo)
z2=(z2-self.zlo)/(self.zhi-self.zlo)
# Convert color to [r,g,b,a] for yt
from matplotlib.colors import to_rgba
colt=to_rgba(color)
colt2=[colt[0],colt[1],colt[2],colt[3]]
colors=[colt2]
colors=numpy.array([colt])
left=numpy.array([(x1-self.xlo)/(self.xhi-self.xlo),
(y1-self.ylo)/(self.yhi-self.ylo),
(z1-self.zlo)/(self.zhi-self.zlo)])
right=numpy.array([(x2-self.xlo)/(self.xhi-self.xlo),
(y2-self.ylo)/(self.yhi-self.ylo),
(z2-self.zlo)/(self.zhi-self.zlo)])
ls=BoxSource(left,right,colors)
print('o2graph:yt-box: Adding box source.')
kname=self.yt_unique_keyname(keyname)
self.yt_scene.add_source(ls,keyname=kname)
# End of function plot_base::yt_box()
return
def yt_text(self,tx,ty,tz,textstr,textcolor=(1,1,1,0.5),
reorient=False,scale=0.6,font=30,
keyname='o2sclpy_text',dpi=100,filename='',
coords=''):
"""
Plot text given in ``textstr`` in a yt volume visualization at
location ``(tx,ty,tz)``. If reorient is ``True``, then
the during an animation, the text will be redrawn so that
it is parallel to the camera. The ``scale`` and ``font``
parameters are passed on to the yt_text_to_scene() function.
In the future, the plan is to allow tx, ty, and tz to be
functions of 'i', so the text can be moved. For now tx, ty,
and tz are just floating point numbers.
"""
if (self.xset==False or self.yset==False or
self.zset==False):
print('Cannot place text before limits set.')
return
if coords!='internal':
xval=(tx-self.xlo)/(self.xhi-self.xlo)
yval=(ty-self.ylo)/(self.yhi-self.ylo)
zval=(tz-self.zlo)/(self.zhi-self.zlo)
else:
xval=tx
yval=ty
zval=tz
kname=self.yt_unique_keyname(keyname)
self.yt_text_objects.append([kname,reorient,xval,yval,zval,textstr,
textcolor,dpi,scale,font])
self.yt_text_to_scene([xval,yval,zval],textstr,scale=scale,
font=font,keyname=kname,filename=filename,
dpi=dpi,textcolor=textcolor)
# End of function plot_base::yt_text()
return
def yt_unique_keyname(self,prefix):
"""
Construct a unique yt keyname by adding integers (beginning with
the number 2) to the user-specified ``prefix``.
"""
if self.yt_scene==0:
return(prefix)
current=prefix
unique=False
count=1
while unique==False:
unique=True
if count>1:
current=prefix+str(count)
for key, value in self.yt_scene.sources.items():
if key==current:
unique=False
if unique==False:
count=count+1
if self.verbose>0 and count>1:
print('Key name',prefix,'changed to unique name',current)
# End of function plot_base::yt_unique_keyname()
return(current)
def yt_create_scene(self):
"""
Create the yt scene object and set yt_created_scene to True.
"""
from yt.visualization.volume_rendering.api import Scene
print('plot_base:yt_create_scene(): Creating scene.')
self.yt_scene=Scene()
self.yt_created_scene=True
# End of function plot_base::yt_create_scene()
return
def yt_create_camera(self,ds):
"""
Create the yt camera object using the class variables
``yt_resolution``, ``yt_position``, and ``yt_focus``, with a
camera width based on the domain width of ``ds``.
"""
if (self.xset==False or self.yset==False or
self.zset==False):
print('Cannot create camera before x, y, and z limits are set.')
return
print('plot_base:yt_create_camera(): Creating camera.')
self.yt_camera=self.yt_scene.add_camera()
self.yt_camera.resolution=self.yt_resolution
if self.yt_width=='default':
self.yt_camera.width=1.5*ds.domain_width[0]
else:
self.yt_camera.width=[eval(self.yt_width)[0],
eval(self.yt_width)[1],
eval(self.yt_width)[2]]
print('Camera width [%0.6e,%0.6e,%0.6e]' %
(self.yt_camera.width[0],
self.yt_camera.width[1],
self.yt_camera.width[2]))
if self.yt_position=='default':
self.yt_camera.position=[1.5,0.6,0.7]
else:
self.yt_camera.position=[(eval(self.yt_position)[0]-self.xlo)/
(self.xhi-self.xlo),
(eval(self.yt_position)[1]-self.ylo)/
(self.yhi-self.ylo),
(eval(self.yt_position)[2]-self.zlo)/
(self.zhi-self.zlo)]
print('Camera position [%0.6e,%0.6e,%0.6e]' %
(self.yt_camera.position[0],
self.yt_camera.position[1],
self.yt_camera.position[2]))
if self.yt_focus=='default':
self.yt_camera.focus=[0.5,0.5,0.5]
else:
self.yt_camera.focus=[(eval(self.yt_focus)[0]-self.xlo)/
(self.xhi-self.xlo),
(eval(self.yt_focus)[1]-self.ylo)/
(self.yhi-self.ylo),
(eval(self.yt_focus)[2]-self.zlo)/
(self.zhi-self.zlo)]
print('Camera focus [%0.6e,%0.6e,%0.6e]' %
(self.yt_camera.focus[0],
self.yt_camera.focus[1],
self.yt_camera.focus[2]))
self.yt_camera.north_vector=[0.0,0.0,1.0]
self.yt_camera.switch_orientation()
self.yt_created_camera=True
# End of function plot_base::yt_create_camera()
return
def yt_text_to_points(self,veco,vecx,vecy,text,dpi=100,font=30,
textcolor=(1,1,1,0.5),show=False,filename=''):
"""
Take three 3D vectors 'veco' (origin), 'vecx' (x direction) and
'vecy' (y direction), and a string of text ('text'), and
return a numpy array of shape (6,npoints) which has entries
(x,y,z,r,g,b). The values r, g, and b are between 0 and 1.
The alpha value of 'textcolor' is also used for the alpha value
of the points.
Generally, to increase the point resolution of the text
rendering, you increase the dpi parameter by some factor
and decrease the scale factor by the same amount. However,
be careful because increasing the number of points
will slow down the yt rendering considerably.
Note that this function presumes a black background so it
cannot handle black text.
Using the default dpi and font size is usually sufficient for
lines of text containing about 30 characters. If more
characters are required, then font must be decreased and dpi
must be increased by the same factor in order to ensure all
characters fit in the temporary figure which this function
generates.
"""
import matplotlib.pyplot as plot
plot.rc('text',usetex=True)
fig=plot.figure(1,figsize=(6.4,4.8),dpi=dpi)
axes=plot.axes([0,0,1,1])
fig.set_facecolor((0,0,0))
axes.set_facecolor((0,0,0))
from matplotlib.colors import to_rgba
alpha=to_rgba(textcolor)[3]
axes.text(0.5,0.5,text,fontsize=font,ha='center',va='center',
color=textcolor)
fig.canvas.draw()
if filename!='':
print("Saving render of text '"+text+
"' in file named "+filename+'.')
plot.savefig(filename)
if show:
plot.show()
X=numpy.array(fig.canvas.renderer._renderer)
Y=[]
Y2=[]
# Note that the array is flipped, so ymax is obtained
# from the width and xmax is obtained from the height
ymax=int(fig.get_dpi()*fig.get_figwidth())
xmax=int(fig.get_dpi()*fig.get_figheight())
for i in range(0,xmax):
for j in range(0,ymax):
if X[i,j,0]!=0 or X[i,j,1]!=0 or X[i,j,2]!=0:
xold=2.0*(i-float(xmax)/2)/float(xmax)
yold=2.0*(j-float(ymax)/2)/float(ymax)
vecnew=[veco[0]-vecy[0]*xold+vecx[0]*yold,
veco[1]-vecy[1]*xold+vecx[1]*yold,
veco[2]-vecy[2]*xold+vecx[2]*yold]
Y.append([vecnew[0],vecnew[1],vecnew[2]])
Y2.append([X[i,j,0]/255.0,X[i,j,1]/255.0,
X[i,j,2]/255.0,alpha])
print('plot_base.yt_text_to_points():\n\t',
'Number of points for',text,'is',len(Y))
# Close the figure so that the memory is released now
# that we have the point data
plot.close(fig)
# End of function plot_base::yt_text_to_points()
return(numpy.array(Y),numpy.array(Y2))
def yt_text_to_scene(self,loc,text,textcolor=(1,1,1,0.5),scale=0.6,
dpi=100,font=30,keyname='o2sclpy_text',
filename=''):
"""
At location 'loc' put text 'text' into the scene using specified
scale parameter and keyname. This function uses the current yt
camera to orient the text so that it is upright and parallel
to the camera. Increasing 'scale' increase the size of the
text and the 'font' parameter is passed on to the
yt_text_to_points() function.
Generally, to increase the point resolution of the text
rendering, you increase the dpi parameter by some factor
and decrease the scale factor by the same amount. However,
be careful because increasing the number of points
will slow down the yt rendering considerably.
Note that this function presumes a black background so it
cannot handle black text.
"""
# Imports
from yt.visualization.volume_rendering.api \
import PointSource
# Construct orientation vectors. We arrange the text to be
# upright and parallel to the camera.
view_y=self.yt_camera.north_vector
view_x=-numpy.cross(view_y,self.yt_camera.focus-
self.yt_camera.position)
# Normalize view_x and view_y
view_x=view_x/numpy.sqrt(view_x[0]**2+view_x[1]**2+view_x[2]**2)
view_y=view_y/numpy.sqrt(view_y[0]**2+view_y[1]**2+view_y[2]**2)
# Choose scale. The extra factor of 0.8 for y seems to be required
# to make the text look correctly scaled.
view_x=view_x*scale
view_y=view_y*scale*0.8
# Convert text to points
(Y,Y2)=self.yt_text_to_points(loc,view_x,view_y,text,
textcolor=textcolor,font=font,
dpi=dpi,filename=filename)
# Add the point source from the arrays returned by
# the yt_text_to_points() function.
points_xalabels=PointSource(Y,colors=Y2)
kname=self.yt_unique_keyname(keyname)
self.yt_scene.add_source(points_xalabels,keyname=kname)
# End of function plot_base::yt_text_to_scene()
return
def yt_plot_axis(self,xval=1.0,yval=1.0,zval=1.0,
color=[1.0,1.0,1.0,0.5],
coords='internal',keyname='o2sclpy_axis'):
"""
Plot an axis in a yt volume consisting a PointSource for the
origin and then three arrows pointing from ``origin`` to
``[0,0,xval]``, ``[0,yval,0]``, and ``[0,0,zval]``. The
specified color is used for the origin and all three arrows.
The arrows are constructed with one main LineSource and then
several smaller LineSource objects in a conical shape to
create the arrow heads.
"""
if self.yt_scene==0:
print('Cannot plot yt axis without a scene.')
return
print('plot_base:yt_plot_axis(): Adding axis.')
# Imports
from yt.visualization.volume_rendering.api \
import PointSource, LineSource
origin=[0,0,0]
ihat=[xval,0,0]
jhat=[0,yval,0]
khat=[0,0,zval]
# Convert color to [r,g,b,a] for yt
from matplotlib.colors import to_rgba
colt=to_rgba(color)
colt2=[colt[0],colt[1],colt[2],colt[3]]
colors=[colt2]
# Point at origin
vertex_origin=numpy.array([origin])
color_origin=numpy.array([colt2])
points=PointSource(vertex_origin,colors=color_origin,radii=3)
kname=self.yt_unique_keyname(keyname+'_o')
self.yt_scene.add_source(points,keyname=kname)
self.yt_arrow(origin,ihat,color=color,keyname=keyname+'_x',
coords=coords)
self.yt_arrow(origin,jhat,color=color,keyname=keyname+'_y',
coords=coords)
self.yt_arrow(origin,khat,color=color,keyname=keyname+'_z',
coords=coords)
# End of function plot_base::yt_plot_axis()
return
def yt_check_backend(self):
"""
For yt, check that we're using the Agg backend, and
print out an error message if we are not.
"""
import matplotlib
if (matplotlib.get_backend()!='Agg' and
matplotlib.get_backend()!='agg'):
print('yt integration only works with Agg.')
print('Current backend is',matplotlib.get_backend())
return 1
# End of function plot_base::yt_check_backend()
return 0
def yt_def_vol(self):
"""
Create a default yt volume source for rendering other objects
"""
import yt
from yt.visualization.volume_rendering.api \
import create_volume_source
if self.verbose>0:
print('No volume object, adding yt volume.')
self.yt_tf=yt.ColorTransferFunction((0,1),grey_opacity=False)
self.yt_tf.add_gaussian(2.0,0.1,[0,0,0,0])
arr=numpy.zeros(shape=(2,2,2))
bbox=numpy.array([[0.0,1.0],[0.0,1.0],[0.0,1.0]])
self.yt_data_sources.append(yt.load_uniform_grid(dict(density=arr),
arr.shape,bbox=bbox))
ds=self.yt_data_sources[len(self.yt_data_sources)-1]
self.yt_vols.append(create_volume_source(ds,field='density'))
vol=self.yt_vols[len(self.yt_vols)-1]
vol.log_field=False
vol.set_transfer_function(self.yt_tf)
if self.yt_created_scene==False:
self.yt_create_scene()
kname=self.yt_unique_keyname('o2sclpy_vol')
self.yt_scene.add_source(vol,keyname=kname)
if self.yt_created_camera==False:
self.yt_create_camera(ds)
# End of function plot_base::yt_def_vol()
return
def text2(self,tx,ty,textstr,**kwargs):
"""
A wrapper for plot_base::yt() which ensures that the
yt transformations are done
"""
# If we're doing a yt text annotation, then add the proper
# transformation
if self.yt_scene!=0:
kwargs=dict(kwargs,transform=self.yt_trans)
self.text(tx,ty,textstr,**kwargs)
return
| gpl-3.0 |
judithyueli/pyFKF | book_format.py | 2 | 3407 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from contextlib import contextmanager
from IPython.core.display import HTML
import json
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import numpy as np
import os.path
import sys
sys.path.insert(0, './code') # allow us to import book_format
def test_filterpy_version():
import filterpy
min_version = [0,0,25]
v = filterpy.__version__
tokens = v.split('.')
for i,v in enumerate(tokens):
if int(v) > min_version[i]:
return
i = len(tokens) - 1
if min_version[i] > int(tokens[i]):
raise Exception("Minimum FilterPy version supported is {}.{}.{}.\n"
"Please install a more recent version.\n"
" ex: pip install filterpy --upgrade".format(
*min_version))
v = int(tokens[0]*1000)
# ensure that we have the correct filterpy loaded. This is
# called when this module is imported at the top of each book
# chapter so the reader can see that they need to update FilterPy.
test_filterpy_version()
def equal_axis():
pylab.rcParams['figure.figsize'] = 10,10
plt.axis('equal')
def reset_axis():
pylab.rcParams['figure.figsize'] = 11, 4
def set_figsize(x=11, y=4):
pylab.rcParams['figure.figsize'] = x, y
@contextmanager
def figsize(x=11, y=4):
"""Temporarily set the figure size using 'with figsize(a,b):'"""
size = pylab.rcParams['figure.figsize']
set_figsize(x, y)
yield
pylab.rcParams['figure.figsize'] = size
@contextmanager
def numpy_precision(precision):
old = np.get_printoptions()['precision']
np.set_printoptions(precision=precision)
yield
np.set_printoptions(old)
@contextmanager
def printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def load_style(directory = '.', name='styles/custom.css'):
if sys.version_info[0] >= 3:
s = json.load(open(os.path.join(directory, "styles/538.json")))
else:
s = json.load(open(directory + "/styles/538.json"), object_hook=_decode_dict)
plt.rcParams.update(s)
reset_axis ()
np.set_printoptions(suppress=True)
styles = open(os.path.join(directory, name), 'r').read()
return HTML(styles)
| mit |
ahoyosid/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/preprocessing/tests/test_encoders.py | 6 | 34784 | # -*- coding: utf-8 -*-
import re
import numpy as np
from scipy import sparse
import pytest
from sklearn.exceptions import NotFittedError
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import _convert_container
from sklearn.utils import is_scalar_nan
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
def test_one_hot_encoder_sparse_dense():
# check that sparse and dense will give the same results
X = np.array([[3, 2, 1], [0, 1, 1]])
enc_sparse = OneHotEncoder()
enc_dense = OneHotEncoder(sparse=False)
X_trans_sparse = enc_sparse.fit_transform(X)
X_trans_dense = enc_dense.fit_transform(X)
assert X_trans_sparse.shape == (2, 5)
assert X_trans_dense.shape == (2, 5)
assert sparse.issparse(X_trans_sparse)
assert not sparse.issparse(X_trans_dense)
# check outcome
assert_array_equal(X_trans_sparse.toarray(), [[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
assert_array_equal(X_trans_sparse.toarray(), X_trans_dense)
def test_one_hot_encoder_diff_n_features():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
X2 = np.array([[1, 0]])
enc = OneHotEncoder()
enc.fit(X)
err_msg = ("The number of features in X is different to the number of "
"features of the fitted data.")
with pytest.raises(ValueError, match=err_msg):
enc.transform(X2)
def test_one_hot_encoder_handle_unknown():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
X2 = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
with pytest.raises(ValueError, match='Found unknown categories'):
oh.transform(X2)
# Test the ignore option, ignores unknown features (giving all 0's)
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
X2_passed = X2.copy()
assert_array_equal(
oh.transform(X2_passed).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# ensure transformed data was not modified in place
assert_allclose(X2, X2_passed)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
with pytest.raises(ValueError, match='handle_unknown should be either'):
oh.fit(X)
def test_one_hot_encoder_not_fitted():
X = np.array([['a'], ['b']])
enc = OneHotEncoder(categories=['a', 'b'])
msg = ("This OneHotEncoder instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this "
"estimator.")
with pytest.raises(NotFittedError, match=msg):
enc.transform(X)
def test_one_hot_encoder_handle_unknown_strings():
X = np.array(['11111111', '22', '333', '4444']).reshape((-1, 1))
X2 = np.array(['55555', '22']).reshape((-1, 1))
# Non Regression test for the issue #12470
# Test the ignore option, when categories are numpy string dtype
# particularly when the known category strings are larger
# than the unknown category strings
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
X2_passed = X2.copy()
assert_array_equal(
oh.transform(X2_passed).toarray(),
np.array([[0., 0., 0., 0.], [0., 1., 0., 0.]]))
# ensure transformed data was not modified in place
assert_array_equal(X2, X2_passed)
@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64])
@pytest.mark.parametrize("input_dtype", [np.int32, np.float32, np.float64])
def test_one_hot_encoder_dtype(input_dtype, output_dtype):
X = np.asarray([[0, 1]], dtype=input_dtype).T
X_expected = np.asarray([[1, 0], [0, 1]], dtype=output_dtype)
oh = OneHotEncoder(categories='auto', dtype=output_dtype)
assert_array_equal(oh.fit_transform(X).toarray(), X_expected)
assert_array_equal(oh.fit(X).transform(X).toarray(), X_expected)
oh = OneHotEncoder(categories='auto', dtype=output_dtype, sparse=False)
assert_array_equal(oh.fit_transform(X), X_expected)
assert_array_equal(oh.fit(X).transform(X), X_expected)
@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64])
def test_one_hot_encoder_dtype_pandas(output_dtype):
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame({'A': ['a', 'b'], 'B': [1, 2]})
X_expected = np.array([[1, 0, 1, 0], [0, 1, 0, 1]], dtype=output_dtype)
oh = OneHotEncoder(dtype=output_dtype)
assert_array_equal(oh.fit_transform(X_df).toarray(), X_expected)
assert_array_equal(oh.fit(X_df).transform(X_df).toarray(), X_expected)
oh = OneHotEncoder(dtype=output_dtype, sparse=False)
assert_array_equal(oh.fit_transform(X_df), X_expected)
assert_array_equal(oh.fit(X_df).transform(X_df), X_expected)
def test_one_hot_encoder_feature_names():
enc = OneHotEncoder()
X = [['Male', 1, 'girl', 2, 3],
['Female', 41, 'girl', 1, 10],
['Male', 51, 'boy', 12, 3],
['Male', 91, 'girl', 21, 30]]
enc.fit(X)
feature_names = enc.get_feature_names()
assert isinstance(feature_names, np.ndarray)
assert_array_equal(['x0_Female', 'x0_Male',
'x1_1', 'x1_41', 'x1_51', 'x1_91',
'x2_boy', 'x2_girl',
'x3_1', 'x3_2', 'x3_12', 'x3_21',
'x4_3',
'x4_10', 'x4_30'], feature_names)
feature_names2 = enc.get_feature_names(['one', 'two',
'three', 'four', 'five'])
assert_array_equal(['one_Female', 'one_Male',
'two_1', 'two_41', 'two_51', 'two_91',
'three_boy', 'three_girl',
'four_1', 'four_2', 'four_12', 'four_21',
'five_3', 'five_10', 'five_30'], feature_names2)
with pytest.raises(ValueError, match="input_features should have length"):
enc.get_feature_names(['one', 'two'])
def test_one_hot_encoder_feature_names_unicode():
enc = OneHotEncoder()
X = np.array([['c❤t1', 'dat2']], dtype=object).T
enc.fit(X)
feature_names = enc.get_feature_names()
assert_array_equal(['x0_c❤t1', 'x0_dat2'], feature_names)
feature_names = enc.get_feature_names(input_features=['n👍me'])
assert_array_equal(['n👍me_c❤t1', 'n👍me_dat2'], feature_names)
def test_one_hot_encoder_set_params():
X = np.array([[1, 2]]).T
oh = OneHotEncoder()
# set params on not yet fitted object
oh.set_params(categories=[[0, 1, 2, 3]])
assert oh.get_params()['categories'] == [[0, 1, 2, 3]]
assert oh.fit_transform(X).toarray().shape == (2, 4)
# set params on already fitted object
oh.set_params(categories=[[0, 1, 2, 3, 4]])
assert oh.fit_transform(X).toarray().shape == (2, 5)
def check_categorical_onehot(X):
enc = OneHotEncoder(categories='auto')
Xtr1 = enc.fit_transform(X)
enc = OneHotEncoder(categories='auto', sparse=False)
Xtr2 = enc.fit_transform(X)
assert_allclose(Xtr1.toarray(), Xtr2)
assert sparse.isspmatrix_csr(Xtr1)
return Xtr1.toarray()
@pytest.mark.parametrize("X", [
[['def', 1, 55], ['abc', 2, 55]],
np.array([[10, 1, 55], [5, 2, 55]]),
np.array([['b', 'A', 'cat'], ['a', 'B', 'cat']], dtype=object),
np.array([['b', 1, 'cat'], ['a', np.nan, 'cat']], dtype=object),
np.array([['b', 1, 'cat'], ['a', float('nan'), 'cat']], dtype=object),
np.array([[None, 1, 'cat'], ['a', 2, 'cat']], dtype=object),
np.array([[None, 1, None], ['a', np.nan, None]], dtype=object),
np.array([[None, 1, None], ['a', float('nan'), None]], dtype=object),
], ids=['mixed', 'numeric', 'object', 'mixed-nan', 'mixed-float-nan',
'mixed-None', 'mixed-None-nan', 'mixed-None-float-nan'])
def test_one_hot_encoder(X):
Xtr = check_categorical_onehot(np.array(X)[:, [0]])
assert_allclose(Xtr, [[0, 1], [1, 0]])
Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]])
assert_allclose(Xtr, [[0, 1, 1, 0], [1, 0, 0, 1]])
Xtr = OneHotEncoder(categories='auto').fit_transform(X)
assert_allclose(Xtr.toarray(), [[0, 1, 1, 0, 1], [1, 0, 0, 1, 1]])
@pytest.mark.parametrize('sparse_', [False, True])
@pytest.mark.parametrize('drop', [None, 'first'])
def test_one_hot_encoder_inverse(sparse_, drop):
X = [['abc', 2, 55], ['def', 1, 55], ['abc', 3, 55]]
enc = OneHotEncoder(sparse=sparse_, drop=drop)
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
assert_array_equal(enc.inverse_transform(X_tr), exp)
X = [[2, 55], [1, 55], [3, 55]]
enc = OneHotEncoder(sparse=sparse_, categories='auto',
drop=drop)
X_tr = enc.fit_transform(X)
exp = np.array(X)
assert_array_equal(enc.inverse_transform(X_tr), exp)
if drop is None:
# with unknown categories
# drop is incompatible with handle_unknown=ignore
X = [['abc', 2, 55], ['def', 1, 55], ['abc', 3, 55]]
enc = OneHotEncoder(sparse=sparse_, handle_unknown='ignore',
categories=[['abc', 'def'], [1, 2],
[54, 55, 56]])
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
exp[2, 1] = None
assert_array_equal(enc.inverse_transform(X_tr), exp)
# with an otherwise numerical output, still object if unknown
X = [[2, 55], [1, 55], [3, 55]]
enc = OneHotEncoder(sparse=sparse_, categories=[[1, 2], [54, 56]],
handle_unknown='ignore')
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
exp[2, 0] = None
exp[:, 1] = None
assert_array_equal(enc.inverse_transform(X_tr), exp)
# incorrect shape raises
X_tr = np.array([[0, 1, 1], [1, 0, 1]])
msg = re.escape('Shape of the passed X data is not correct')
with pytest.raises(ValueError, match=msg):
enc.inverse_transform(X_tr)
@pytest.mark.parametrize('sparse_', [False, True])
@pytest.mark.parametrize(
"X, X_trans",
[
([[2, 55], [1, 55], [2, 55]], [[0, 1, 1], [0, 0, 0], [0, 1, 1]]),
([['one', 'a'], ['two', 'a'], ['three', 'b'], ['two', 'a']],
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]]),
]
)
def test_one_hot_encoder_inverse_transform_raise_error_with_unknown(
X, X_trans, sparse_
):
"""Check that `inverse_transform` raise an error with unknown samples, no
dropped feature, and `handle_unknow="error`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/14934
"""
enc = OneHotEncoder(sparse=sparse_).fit(X)
msg = (
r"Samples \[(\d )*\d\] can not be inverted when drop=None and "
r"handle_unknown='error' because they contain all zeros"
)
if sparse_:
# emulate sparse data transform by a one-hot encoder sparse.
X_trans = _convert_container(X_trans, "sparse")
with pytest.raises(ValueError, match=msg):
enc.inverse_transform(X_trans)
def test_one_hot_encoder_inverse_if_binary():
X = np.array([['Male', 1],
['Female', 3],
['Female', 2]], dtype=object)
ohe = OneHotEncoder(drop='if_binary', sparse=False)
X_tr = ohe.fit_transform(X)
assert_array_equal(ohe.inverse_transform(X_tr), X)
# check that resetting drop option without refitting does not throw an error
@pytest.mark.parametrize('drop', ['if_binary', 'first', None])
@pytest.mark.parametrize('reset_drop', ['if_binary', 'first', None])
def test_one_hot_encoder_drop_reset(drop, reset_drop):
X = np.array([['Male', 1],
['Female', 3],
['Female', 2]], dtype=object)
ohe = OneHotEncoder(drop=drop, sparse=False)
ohe.fit(X)
X_tr = ohe.transform(X)
feature_names = ohe.get_feature_names()
ohe.set_params(drop=reset_drop)
assert_array_equal(ohe.inverse_transform(X_tr), X)
assert_allclose(ohe.transform(X), X_tr)
assert_array_equal(ohe.get_feature_names(), feature_names)
@pytest.mark.parametrize("method", ['fit', 'fit_transform'])
@pytest.mark.parametrize("X", [
[1, 2],
np.array([3., 4.])
])
def test_X_is_not_1D(X, method):
oh = OneHotEncoder()
msg = ("Expected 2D array, got 1D array instead")
with pytest.raises(ValueError, match=msg):
getattr(oh, method)(X)
@pytest.mark.parametrize("method", ['fit', 'fit_transform'])
def test_X_is_not_1D_pandas(method):
pd = pytest.importorskip('pandas')
X = pd.Series([6, 3, 4, 6])
oh = OneHotEncoder()
msg = ("Expected 2D array, got 1D array instead")
with pytest.raises(ValueError, match=msg):
getattr(oh, method)(X)
@pytest.mark.parametrize("X, cat_exp, cat_dtype", [
([['abc', 55], ['def', 55]], [['abc', 'def'], [55]], np.object_),
(np.array([[1, 2], [3, 2]]), [[1, 3], [2]], np.integer),
(np.array([['A', 'cat'], ['B', 'cat']], dtype=object),
[['A', 'B'], ['cat']], np.object_),
(np.array([['A', 'cat'], ['B', 'cat']]),
[['A', 'B'], ['cat']], np.str_),
(np.array([[1, 2], [np.nan, 2]]), [[1, np.nan], [2]], np.float_),
(np.array([['A', np.nan], [None, np.nan]], dtype=object),
[['A', None], [np.nan]], np.object_),
(np.array([['A', float('nan')], [None, float('nan')]], dtype=object),
[['A', None], [float('nan')]], np.object_),
], ids=['mixed', 'numeric', 'object', 'string', 'missing-float',
'missing-np.nan-object', 'missing-float-nan-object'])
def test_one_hot_encoder_categories(X, cat_exp, cat_dtype):
# order of categories should not depend on order of samples
for Xi in [X, X[::-1]]:
enc = OneHotEncoder(categories='auto')
enc.fit(Xi)
# assert enc.categories == 'auto'
assert isinstance(enc.categories_, list)
for res, exp in zip(enc.categories_, cat_exp):
res_list = res.tolist()
if is_scalar_nan(exp[-1]):
assert is_scalar_nan(res_list[-1])
assert res_list[:-1] == exp[:-1]
else:
assert res.tolist() == exp
assert np.issubdtype(res.dtype, cat_dtype)
@pytest.mark.parametrize("X, X2, cats, cat_dtype", [
(np.array([['a', 'b']], dtype=object).T,
np.array([['a', 'd']], dtype=object).T,
[['a', 'b', 'c']], np.object_),
(np.array([[1, 2]], dtype='int64').T,
np.array([[1, 4]], dtype='int64').T,
[[1, 2, 3]], np.int64),
(np.array([['a', 'b']], dtype=object).T,
np.array([['a', 'd']], dtype=object).T,
[np.array(['a', 'b', 'c'])], np.object_),
(np.array([[None, 'a']], dtype=object).T,
np.array([[None, 'b']], dtype=object).T,
[[None, 'a', 'z']], object),
(np.array([['a', 'b']], dtype=object).T,
np.array([['a', np.nan]], dtype=object).T,
[['a', 'b', 'z']], object),
(np.array([['a', None]], dtype=object).T,
np.array([['a', np.nan]], dtype=object).T,
[['a', None, 'z']], object),
(np.array([['a', np.nan]], dtype=object).T,
np.array([['a', None]], dtype=object).T,
[['a', np.nan, 'z']], object),
], ids=['object', 'numeric', 'object-string',
'object-string-none', 'object-string-nan',
'object-None-and-nan', 'object-nan-and-None'])
def test_one_hot_encoder_specified_categories(X, X2, cats, cat_dtype):
enc = OneHotEncoder(categories=cats)
exp = np.array([[1., 0., 0.],
[0., 1., 0.]])
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert list(enc.categories[0]) == list(cats[0])
assert enc.categories_[0].tolist() == list(cats[0])
# manually specified categories should have same dtype as
# the data when coerced from lists
assert enc.categories_[0].dtype == cat_dtype
# when specifying categories manually, unknown categories should already
# raise when fitting
enc = OneHotEncoder(categories=cats)
with pytest.raises(ValueError, match="Found unknown categories"):
enc.fit(X2)
enc = OneHotEncoder(categories=cats, handle_unknown='ignore')
exp = np.array([[1., 0., 0.], [0., 0., 0.]])
assert_array_equal(enc.fit(X2).transform(X2).toarray(), exp)
def test_one_hot_encoder_unsorted_categories():
X = np.array([['a', 'b']], dtype=object).T
enc = OneHotEncoder(categories=[['b', 'a', 'c']])
exp = np.array([[0., 1., 0.],
[1., 0., 0.]])
assert_array_equal(enc.fit(X).transform(X).toarray(), exp)
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert enc.categories_[0].tolist() == ['b', 'a', 'c']
assert np.issubdtype(enc.categories_[0].dtype, np.object_)
# unsorted passed categories still raise for numerical values
X = np.array([[1, 2]]).T
enc = OneHotEncoder(categories=[[2, 1, 3]])
msg = 'Unsorted categories are not supported'
with pytest.raises(ValueError, match=msg):
enc.fit_transform(X)
# np.nan must be the last category in categories[0] to be considered sorted
X = np.array([[1, 2, np.nan]]).T
enc = OneHotEncoder(categories=[[1, np.nan, 2]])
with pytest.raises(ValueError, match=msg):
enc.fit_transform(X)
def test_one_hot_encoder_specified_categories_mixed_columns():
# multiple columns
X = np.array([['a', 'b'], [0, 2]], dtype=object).T
enc = OneHotEncoder(categories=[['a', 'b', 'c'], [0, 1, 2]])
exp = np.array([[1., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 1.]])
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert enc.categories_[0].tolist() == ['a', 'b', 'c']
assert np.issubdtype(enc.categories_[0].dtype, np.object_)
assert enc.categories_[1].tolist() == [0, 1, 2]
# integer categories but from object dtype data
assert np.issubdtype(enc.categories_[1].dtype, np.object_)
def test_one_hot_encoder_pandas():
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame({'A': ['a', 'b'], 'B': [1, 2]})
Xtr = check_categorical_onehot(X_df)
assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]])
@pytest.mark.parametrize("drop, expected_names",
[('first', ['x0_c', 'x2_b']),
('if_binary', ['x0_c', 'x1_2', 'x2_b']),
(['c', 2, 'b'], ['x0_b', 'x2_a'])],
ids=['first', 'binary', 'manual'])
def test_one_hot_encoder_feature_names_drop(drop, expected_names):
X = [['c', 2, 'a'],
['b', 2, 'b']]
ohe = OneHotEncoder(drop=drop)
ohe.fit(X)
feature_names = ohe.get_feature_names()
assert isinstance(feature_names, np.ndarray)
assert_array_equal(expected_names, feature_names)
def test_one_hot_encoder_drop_equals_if_binary():
# Canonical case
X = [[10, 'yes'],
[20, 'no'],
[30, 'yes']]
expected = np.array([[1., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 0., 1., 1.]])
expected_drop_idx = np.array([None, 0])
ohe = OneHotEncoder(drop='if_binary', sparse=False)
result = ohe.fit_transform(X)
assert_array_equal(ohe.drop_idx_, expected_drop_idx)
assert_allclose(result, expected)
# with only one cat, the behaviour is equivalent to drop=None
X = [['true', 'a'],
['false', 'a'],
['false', 'a']]
expected = np.array([[1., 1.],
[0., 1.],
[0., 1.]])
expected_drop_idx = np.array([0, None])
ohe = OneHotEncoder(drop='if_binary', sparse=False)
result = ohe.fit_transform(X)
assert_array_equal(ohe.drop_idx_, expected_drop_idx)
assert_allclose(result, expected)
@pytest.mark.parametrize("X", [
[['abc', 2, 55], ['def', 1, 55]],
np.array([[10, 2, 55], [20, 1, 55]]),
np.array([['a', 'B', 'cat'], ['b', 'A', 'cat']], dtype=object)
], ids=['mixed', 'numeric', 'object'])
def test_ordinal_encoder(X):
enc = OrdinalEncoder()
exp = np.array([[0, 1, 0],
[1, 0, 0]], dtype='int64')
assert_array_equal(enc.fit_transform(X), exp.astype('float64'))
enc = OrdinalEncoder(dtype='int64')
assert_array_equal(enc.fit_transform(X), exp)
@pytest.mark.parametrize("X, X2, cats, cat_dtype", [
(np.array([['a', 'b']], dtype=object).T,
np.array([['a', 'd']], dtype=object).T,
[['a', 'b', 'c']], np.object_),
(np.array([[1, 2]], dtype='int64').T,
np.array([[1, 4]], dtype='int64').T,
[[1, 2, 3]], np.int64),
(np.array([['a', 'b']], dtype=object).T,
np.array([['a', 'd']], dtype=object).T,
[np.array(['a', 'b', 'c'])], np.object_),
], ids=['object', 'numeric', 'object-string-cat'])
def test_ordinal_encoder_specified_categories(X, X2, cats, cat_dtype):
enc = OrdinalEncoder(categories=cats)
exp = np.array([[0.], [1.]])
assert_array_equal(enc.fit_transform(X), exp)
assert list(enc.categories[0]) == list(cats[0])
assert enc.categories_[0].tolist() == list(cats[0])
# manually specified categories should have same dtype as
# the data when coerced from lists
assert enc.categories_[0].dtype == cat_dtype
# when specifying categories manually, unknown categories should already
# raise when fitting
enc = OrdinalEncoder(categories=cats)
with pytest.raises(ValueError, match="Found unknown categories"):
enc.fit(X2)
def test_ordinal_encoder_inverse():
X = [['abc', 2, 55], ['def', 1, 55]]
enc = OrdinalEncoder()
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
assert_array_equal(enc.inverse_transform(X_tr), exp)
# incorrect shape raises
X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]])
msg = re.escape('Shape of the passed X data is not correct')
with pytest.raises(ValueError, match=msg):
enc.inverse_transform(X_tr)
@pytest.mark.parametrize("X", [np.array([[1, np.nan]]).T,
np.array([['a', np.nan]], dtype=object).T],
ids=['numeric', 'object'])
def test_ordinal_encoder_raise_missing(X):
ohe = OrdinalEncoder()
with pytest.raises(ValueError, match="Input contains NaN"):
ohe.fit(X)
with pytest.raises(ValueError, match="Input contains NaN"):
ohe.fit_transform(X)
ohe.fit(X[:1, :])
with pytest.raises(ValueError, match="Input contains NaN"):
ohe.transform(X)
def test_ordinal_encoder_handle_unknowns_string():
enc = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-2)
X_fit = np.array([['a', 'x'], ['b', 'y'], ['c', 'z']], dtype=object)
X_trans = np.array([['c', 'xy'], ['bla', 'y'], ['a', 'x']], dtype=object)
enc.fit(X_fit)
X_trans_enc = enc.transform(X_trans)
exp = np.array([[2, -2], [-2, 1], [0, 0]], dtype='int64')
assert_array_equal(X_trans_enc, exp)
X_trans_inv = enc.inverse_transform(X_trans_enc)
inv_exp = np.array([['c', None], [None, 'y'], ['a', 'x']], dtype=object)
assert_array_equal(X_trans_inv, inv_exp)
@pytest.mark.parametrize('dtype', [float, int])
def test_ordinal_encoder_handle_unknowns_numeric(dtype):
enc = OrdinalEncoder(handle_unknown='use_encoded_value',
unknown_value=-999)
X_fit = np.array([[1, 7], [2, 8], [3, 9]], dtype=dtype)
X_trans = np.array([[3, 12], [23, 8], [1, 7]], dtype=dtype)
enc.fit(X_fit)
X_trans_enc = enc.transform(X_trans)
exp = np.array([[2, -999], [-999, 1], [0, 0]], dtype='int64')
assert_array_equal(X_trans_enc, exp)
X_trans_inv = enc.inverse_transform(X_trans_enc)
inv_exp = np.array([[3, None], [None, 8], [1, 7]], dtype=object)
assert_array_equal(X_trans_inv, inv_exp)
def test_ordinal_encoder_handle_unknowns_raise():
X = np.array([['a', 'x'], ['b', 'y']], dtype=object)
enc = OrdinalEncoder(handle_unknown='use_encoded_value')
msg = ("unknown_value should be an integer or np.nan when handle_unknown "
"is 'use_encoded_value', got None.")
with pytest.raises(TypeError, match=msg):
enc.fit(X)
enc = OrdinalEncoder(unknown_value=-2)
msg = ("unknown_value should only be set when handle_unknown is "
"'use_encoded_value', got -2.")
with pytest.raises(TypeError, match=msg):
enc.fit(X)
enc = OrdinalEncoder(handle_unknown='use_encoded_value',
unknown_value='bla')
msg = ("unknown_value should be an integer or np.nan when handle_unknown "
"is 'use_encoded_value', got bla.")
with pytest.raises(TypeError, match=msg):
enc.fit(X)
enc = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=1)
msg = ("The used value for unknown_value (1) is one of the values already "
"used for encoding the seen categories.")
with pytest.raises(ValueError, match=msg):
enc.fit(X)
def test_ordinal_encoder_handle_unknowns_nan():
# Make sure unknown_value=np.nan properly works
enc = OrdinalEncoder(handle_unknown='use_encoded_value',
unknown_value=np.nan)
X_fit = np.array([[1], [2], [3]])
enc.fit(X_fit)
X_trans = enc.transform([[1], [2], [4]])
assert_array_equal(X_trans, [[0], [1], [np.nan]])
def test_ordinal_encoder_handle_unknowns_nan_non_float_dtype():
# Make sure an error is raised when unknown_value=np.nan and the dtype
# isn't a float dtype
enc = OrdinalEncoder(handle_unknown='use_encoded_value',
unknown_value=np.nan, dtype=int)
X_fit = np.array([[1], [2], [3]])
with pytest.raises(ValueError,
match="dtype parameter should be a float dtype"):
enc.fit(X_fit)
def test_ordinal_encoder_raise_categories_shape():
X = np.array([['Low', 'Medium', 'High', 'Medium', 'Low']], dtype=object).T
cats = ['Low', 'Medium', 'High']
enc = OrdinalEncoder(categories=cats)
msg = ("Shape mismatch: if categories is an array,")
with pytest.raises(ValueError, match=msg):
enc.fit(X)
def test_encoder_dtypes():
# check that dtypes are preserved when determining categories
enc = OneHotEncoder(categories='auto')
exp = np.array([[1., 0., 1., 0.], [0., 1., 0., 1.]], dtype='float64')
for X in [np.array([[1, 2], [3, 4]], dtype='int64'),
np.array([[1, 2], [3, 4]], dtype='float64'),
np.array([['a', 'b'], ['c', 'd']]), # string dtype
np.array([[1, 'a'], [3, 'b']], dtype='object')]:
enc.fit(X)
assert all([enc.categories_[i].dtype == X.dtype for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = [[1, 2], [3, 4]]
enc.fit(X)
assert all([np.issubdtype(enc.categories_[i].dtype, np.integer)
for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = [[1, 'a'], [3, 'b']]
enc.fit(X)
assert all([enc.categories_[i].dtype == 'object' for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
def test_encoder_dtypes_pandas():
# check dtype (similar to test_categorical_encoder_dtypes for dataframes)
pd = pytest.importorskip('pandas')
enc = OneHotEncoder(categories='auto')
exp = np.array([[1., 0., 1., 0., 1., 0.],
[0., 1., 0., 1., 0., 1.]], dtype='float64')
X = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]}, dtype='int64')
enc.fit(X)
assert all([enc.categories_[i].dtype == 'int64' for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = pd.DataFrame({'A': [1, 2], 'B': ['a', 'b'], 'C': [3., 4.]})
X_type = [X['A'].dtype, X['B'].dtype, X['C'].dtype]
enc.fit(X)
assert all([enc.categories_[i].dtype == X_type[i] for i in range(3)])
assert_array_equal(enc.transform(X).toarray(), exp)
def test_one_hot_encoder_warning():
enc = OneHotEncoder()
X = [['Male', 1], ['Female', 3]]
np.testing.assert_no_warnings(enc.fit_transform, X)
@pytest.mark.parametrize("missing_value", [np.nan, None, float('nan')])
def test_one_hot_encoder_drop_manual(missing_value):
cats_to_drop = ['def', 12, 3, 56, missing_value]
enc = OneHotEncoder(drop=cats_to_drop)
X = [['abc', 12, 2, 55, 'a'],
['def', 12, 1, 55, 'a'],
['def', 12, 3, 56, missing_value]]
trans = enc.fit_transform(X).toarray()
exp = [[1, 0, 1, 1, 1],
[0, 1, 0, 1, 1],
[0, 0, 0, 0, 0]]
assert_array_equal(trans, exp)
dropped_cats = [cat[feature]
for cat, feature in zip(enc.categories_,
enc.drop_idx_)]
X_inv_trans = enc.inverse_transform(trans)
X_array = np.array(X, dtype=object)
# last value is np.nan
if is_scalar_nan(cats_to_drop[-1]):
assert_array_equal(dropped_cats[:-1], cats_to_drop[:-1])
assert is_scalar_nan(dropped_cats[-1])
assert is_scalar_nan(cats_to_drop[-1])
# do not include the last column which includes missing values
assert_array_equal(X_array[:, :-1], X_inv_trans[:, :-1])
# check last column is the missing value
assert_array_equal(X_array[-1, :-1], X_inv_trans[-1, :-1])
assert is_scalar_nan(X_array[-1, -1])
assert is_scalar_nan(X_inv_trans[-1, -1])
else:
assert_array_equal(dropped_cats, cats_to_drop)
assert_array_equal(X_array, X_inv_trans)
@pytest.mark.parametrize(
"X_fit, params, err_msg",
[([["Male"], ["Female"]], {'drop': 'second'},
"Wrong input for parameter `drop`"),
([["Male"], ["Female"]], {'drop': 'first', 'handle_unknown': 'ignore'},
"`handle_unknown` must be 'error'"),
([['abc', 2, 55], ['def', 1, 55], ['def', 3, 59]],
{'drop': np.asarray('b', dtype=object)},
"Wrong input for parameter `drop`"),
([['abc', 2, 55], ['def', 1, 55], ['def', 3, 59]],
{'drop': ['ghi', 3, 59]},
"The following categories were supposed")]
)
def test_one_hot_encoder_invalid_params(X_fit, params, err_msg):
enc = OneHotEncoder(**params)
with pytest.raises(ValueError, match=err_msg):
enc.fit(X_fit)
@pytest.mark.parametrize('drop', [['abc', 3], ['abc', 3, 41, 'a']])
def test_invalid_drop_length(drop):
enc = OneHotEncoder(drop=drop)
err_msg = "`drop` should have length equal to the number"
with pytest.raises(ValueError, match=err_msg):
enc.fit([['abc', 2, 55], ['def', 1, 55], ['def', 3, 59]])
@pytest.mark.parametrize("density", [True, False],
ids=['sparse', 'dense'])
@pytest.mark.parametrize("drop", ['first',
['a', 2, 'b']],
ids=['first', 'manual'])
def test_categories(density, drop):
ohe_base = OneHotEncoder(sparse=density)
ohe_test = OneHotEncoder(sparse=density, drop=drop)
X = [['c', 1, 'a'],
['a', 2, 'b']]
ohe_base.fit(X)
ohe_test.fit(X)
assert_array_equal(ohe_base.categories_, ohe_test.categories_)
if drop == 'first':
assert_array_equal(ohe_test.drop_idx_, 0)
else:
for drop_cat, drop_idx, cat_list in zip(drop,
ohe_test.drop_idx_,
ohe_test.categories_):
assert cat_list[int(drop_idx)] == drop_cat
assert isinstance(ohe_test.drop_idx_, np.ndarray)
assert ohe_test.drop_idx_.dtype == object
@pytest.mark.parametrize('Encoder', [OneHotEncoder, OrdinalEncoder])
def test_encoders_has_categorical_tags(Encoder):
assert 'categorical' in Encoder()._get_tags()['X_types']
@pytest.mark.parametrize('input_dtype', ['O', 'U'])
@pytest.mark.parametrize('category_dtype', ['O', 'U'])
@pytest.mark.parametrize('array_type', ['list', 'array', 'dataframe'])
def test_encoders_unicode_categories(input_dtype, category_dtype, array_type):
"""Check that encoding work with string and object dtypes.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15616
https://github.com/scikit-learn/scikit-learn/issues/15726
"""
X = np.array([['b'], ['a']], dtype=input_dtype)
categories = [np.array(['b', 'a'], dtype=category_dtype)]
ohe = OneHotEncoder(categories=categories, sparse=False).fit(X)
X_test = _convert_container([['a'], ['a'], ['b'], ['a']], array_type)
X_trans = ohe.transform(X_test)
expected = np.array([[0, 1], [0, 1], [1, 0], [0, 1]])
assert_allclose(X_trans, expected)
oe = OrdinalEncoder(categories=categories).fit(X)
X_trans = oe.transform(X_test)
expected = np.array([[1], [1], [0], [1]])
assert_array_equal(X_trans, expected)
@pytest.mark.parametrize("missing_value", [np.nan, None])
def test_ohe_missing_values_get_feature_names(missing_value):
# encoder with missing values with object dtypes
X = np.array([['a', 'b', missing_value, 'a', missing_value]],
dtype=object).T
ohe = OneHotEncoder(sparse=False, handle_unknown='ignore').fit(X)
names = ohe.get_feature_names()
assert_array_equal(names, ['x0_a', 'x0_b', f'x0_{missing_value}'])
def test_ohe_missing_value_support_pandas():
# check support for pandas with mixed dtypes and missing values
pd = pytest.importorskip('pandas')
df = pd.DataFrame({
'col1': ['dog', 'cat', None, 'cat'],
'col2': np.array([3, 0, 4, np.nan], dtype=float)
}, columns=['col1', 'col2'])
expected_df_trans = np.array([
[0, 1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1],
])
Xtr = check_categorical_onehot(df)
assert_allclose(Xtr, expected_df_trans)
@pytest.mark.parametrize('pd_nan_type', ['pd.NA', 'np.nan'])
def test_ohe_missing_value_support_pandas_categorical(pd_nan_type):
# checks pandas dataframe with categorical features
if pd_nan_type == 'pd.NA':
# pd.NA is in pandas 1.0
pd = pytest.importorskip('pandas', minversion="1.0")
pd_missing_value = pd.NA
else: # np.nan
pd = pytest.importorskip('pandas')
pd_missing_value = np.nan
df = pd.DataFrame({
'col1': pd.Series(['c', 'a', pd_missing_value, 'b', 'a'],
dtype='category'),
})
expected_df_trans = np.array([
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0],
])
ohe = OneHotEncoder(sparse=False, handle_unknown='ignore')
df_trans = ohe.fit_transform(df)
assert_allclose(expected_df_trans, df_trans)
assert len(ohe.categories_) == 1
assert_array_equal(ohe.categories_[0][:-1], ['a', 'b', 'c'])
assert np.isnan(ohe.categories_[0][-1])
| bsd-3-clause |
massmutual/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
minghuam/caffe | python/detect.py | 25 | 5026 | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/imagenet_deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/caffe_reference_imagenet_model"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="center_only",
choices=CROP_MODES,
help="Image crop mode"
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
default=255,
help="Multiply input features by this scale before input to net"
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
args = parser.parse_args()
channel_swap = [int(s) for s in args.channel_swap.split(',')]
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model,
gpu=args.gpu, mean_file=args.mean_file,
input_scale=args.input_scale, channel_swap=channel_swap)
if args.gpu:
print 'GPU mode'
# Load input.
t = time.time()
print('Loading input...')
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = (
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
)
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| bsd-2-clause |
leesavide/pythonista-docs | Documentation/matplotlib/examples/old_animation/animation_blit_wx.py | 3 | 1990 | # For detailed comments on animation and the techniqes used here, see
# the wiki entry
# http://www.scipy.org/wikis/topical_software/MatplotlibAnimation
from __future__ import print_function
# The number of blits() to make before exiting
NBLITS = 1000
import matplotlib
matplotlib.use('WXAgg')
matplotlib.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
import wx
import sys
import pylab as p
import numpy as npy
import time
# allow the user to disable the WXAgg accelerator from the command line
if '--no-accel' in sys.argv:
import matplotlib.backends.backend_wxagg
matplotlib.backends.backend_wxagg._use_accelerator(False)
fig, ax = plt.subplots()
canvas = fig.canvas
p.subplots_adjust(left=0.3, bottom=0.3) # check for flipy bugs
p.grid() # to ensure proper background restore
# create the initial line
x = npy.arange(0,2*npy.pi,0.01)
line, = p.plot(x, npy.sin(x), animated=True, lw=2)
# for profiling
tstart = time.time()
blit_time = 0.0
def update_line(*args):
global blit_time
if update_line.background is None:
update_line.background = canvas.copy_from_bbox(ax.bbox)
# restore the clean slate background
canvas.restore_region(update_line.background)
# update the data
line.set_ydata(npy.sin(x+update_line.cnt/10.0))
# just draw the animated artist
ax.draw_artist(line)
# just redraw the axes rectangle
t = time.time()
canvas.blit(ax.bbox)
blit_time += time.time() - t
if update_line.cnt == NBLITS:
# print the timing info and quit
frame_time = time.time() - tstart
print('%d frames: %.2f seconds' % (NBLITS, frame_time))
print('%d blits: %.2f seconds' % (NBLITS, blit_time))
print()
print('FPS: %.2f' % (NBLITS/frame_time))
print('BPS: %.2f' % (NBLITS/blit_time))
sys.exit()
update_line.cnt += 1
wx.WakeUpIdle()
update_line.cnt = 0
update_line.background = None
wx.EVT_IDLE(wx.GetApp(), update_line)
p.show()
| apache-2.0 |
btabibian/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 11 | 7453 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
linalg.pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
peterk87/sistr_cmd | sistr/src/blast_wrapper/__init__.py | 1 | 11454 | from datetime import datetime
import logging
import shutil
from subprocess import Popen, PIPE
import os
import pandas as pd
import numpy as np
from pandas.errors import EmptyDataError
import re
BLAST_TABLE_COLS = '''
qseqid
stitle
pident
length
mismatch
gapopen
qstart
qend
sstart
send
evalue
bitscore
qlen
slen
sseq
'''.strip().split('\n')
class BlastRunner:
blast_db_created = False
def __init__(self, fasta_path, tmp_work_dir):
self.tmp_work_dir = tmp_work_dir
self.fasta_path = fasta_path
def _create_tmp_folder(self):
count = 1
tmp_dir = self.tmp_work_dir
while True:
try:
logging.info('Trying to create analysis directory at: %s', tmp_dir)
os.makedirs(tmp_dir)
break
except OSError as e:
logging.warning('Error on creation of tmp analysis directory "{}"! {}'.format(
tmp_dir,
e
))
tmp_dir = '{}_{}'.format(self.tmp_work_dir, count)
count += 1
self.tmp_work_dir = tmp_dir
return self.tmp_work_dir
def _copy_fasta_to_work_dir(self):
filename = os.path.basename(self.fasta_path)
filename_no_spaces = re.sub(r'\W', '_', filename)
dest_path = os.path.join(self.tmp_work_dir, filename_no_spaces)
if self.fasta_path == dest_path:
self.tmp_fasta_path = dest_path
return dest_path
shutil.copyfile(self.fasta_path, dest_path)
self.tmp_fasta_path = dest_path
return dest_path
def _run_makeblastdb(self):
work_dir = os.path.dirname(self.tmp_fasta_path)
filename = os.path.basename(self.tmp_fasta_path)
nin_filepath = os.path.join(work_dir, filename + '.nin')
if os.path.exists(nin_filepath):
self.blast_db_created = True
return self.tmp_fasta_path
p = Popen(['makeblastdb',
'-in', '{}'.format(self.tmp_fasta_path),
'-dbtype', 'nucl'],
stdout=PIPE,
stderr=PIPE)
p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
if stdout is not None and stdout != '':
logging.debug('makeblastdb on {0} STDOUT: {1}'.format(self.tmp_fasta_path, stdout))
if stderr is not None and stderr != '':
logging.debug('makeblastdb on {0} STDERR: {1}'.format(self.tmp_fasta_path, stderr))
if os.path.exists(nin_filepath):
self.blast_db_created = True
return self.tmp_fasta_path
else:
ex_msg = 'makeblastdb was not able to create a BLAST DB for {0}. STDERR: {1}'.format(filename, stderr)
logging.error(ex_msg)
raise Exception(ex_msg)
def blast_against_query(self, query_fasta_path, blast_task='megablast', evalue=1e-20, min_pid=85):
if not self.blast_db_created:
self.prep_blast()
gene_filename = os.path.basename(query_fasta_path)
genome_filename = os.path.basename(self.tmp_fasta_path)
timestamp = '{:%Y%b%d_%H_%M_%S}'.format(datetime.now())
outfile = os.path.join(self.tmp_work_dir, '{}-{}-{}.blast'.format(gene_filename,
genome_filename,
timestamp))
p = Popen(['blastn',
'-task', blast_task,
'-query', query_fasta_path,
'-db', '{}'.format(self.tmp_fasta_path),
'-evalue', '{}'.format(evalue),
'-dust', 'no',
'-perc_identity', '{}'.format(min_pid),
'-out', outfile,
'-outfmt', '6 {}'.format(' '.join(BLAST_TABLE_COLS))],
stdout=PIPE,
stderr=PIPE)
p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
if stdout is not None and stdout != '':
logging.debug('blastn on db {} and query {} STDOUT: {}'.format(genome_filename, gene_filename, stdout))
if stderr is not None and stderr != '':
logging.debug('blastn on db {} and query {} STDERR: {}'.format(genome_filename, gene_filename, stderr))
if os.path.exists(outfile):
return outfile
else:
ex_msg = 'blastn on db {} and query {} did not produce expected output file at {}'.format(genome_filename,
gene_filename,
outfile)
logging.error(ex_msg)
raise Exception(ex_msg)
def cleanup(self):
self.blast_db_created = False
shutil.rmtree(self.tmp_work_dir)
def prep_blast(self):
self._create_tmp_folder()
self._copy_fasta_to_work_dir()
self._run_makeblastdb()
def run_blast(self, query_fasta_path):
self.prep_blast()
blast_outfile = self.blast_against_query(query_fasta_path)
return blast_outfile
class BlastReader:
is_missing = True
is_perfect_match = False
is_trunc = False
df = None
def __init__(self, blast_outfile,filter=[]):
"""Read BLASTN output file into a pandas DataFrame
Sort the DataFrame by BLAST bitscore.
If there are no BLASTN results, then no results can be returned.
Args:
blast_outfile (str): `blastn` output file path
Raises:
EmptyDataError: No data could be parsed from the `blastn` output file
"""
self.blast_outfile = blast_outfile
try:
self.df = pd.read_csv(self.blast_outfile, header=None, sep='\t')
self.df.columns = BLAST_TABLE_COLS
# calculate the coverage for when results need to be validated
self.df.loc[:, 'coverage'] = self.df.length / self.df.qlen
self.df.sort_values(by='bitscore', ascending=False, inplace=True)
self.df.loc[:, 'is_trunc'] = BlastReader.trunc(qstart=self.df.qstart,
qend=self.df.qend,
qlen=self.df.qlen,
sstart=self.df.sstart,
send=self.df.send,
slen=self.df.slen)
logging.debug(self.df.head())
self.is_missing = False
self.filter_rows(filter)
except EmptyDataError as exc:
logging.warning('No BLASTN results to parse from file %s', blast_outfile)
self.is_missing = True
def filter_rows(self,filter):
for f in filter:
self.df = self.df[~self.df['qseqid'].str.contains(f)]
def df_dict(self):
if not self.is_missing:
return self.df.to_dict()
@staticmethod
def df_first_row_to_dict(df):
"""First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None`
"""
if df is not None and not df.empty:
return [dict(r) for i, r in df.head(1).iterrows()][0]
@staticmethod
def is_blast_result_trunc(qstart, qend, sstart, send, qlen, slen):
"""Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int): Query sequence start index
qend (int): Query sequence end index
sstart (int): Subject sequence start index
send (int): Subject sequence end index
qlen (int): Query sequence length
slen (int): Subject sequence length
Returns:
bool: Result truncated by subject sequence end?
"""
q_match_len = abs(qstart - qend) + 1
s_max = max(sstart, send)
s_min = min(sstart, send)
return (q_match_len < qlen) and (s_max >= slen or s_min <= 1)
@staticmethod
def trunc(qstart, qend, sstart, send, qlen, slen):
"""Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int pandas.Series): Query sequence start index
qend (int pandas.Series): Query sequence end index
sstart (int pandas.Series): Subject sequence start index
send (int pandas.Series): Subject sequence end index
qlen (int pandas.Series): Query sequence length
slen (int pandas.Series): Subject sequence length
Returns:
Boolean pandas.Series: Result truncated by subject sequence end?
"""
ssum2 = (send + sstart) / 2.0
sabs2 = np.abs(send - sstart) / 2.0
smax = ssum2 + sabs2
smin = ssum2 - sabs2
q_match_len = np.abs(qstart - qend) + 1
return (q_match_len < qlen) & ((smax >= slen) | (smin <= 1))
def perfect_matches(self):
"""
Return pandas DataFrame with perfect BLAST matches (100% identity and coverage)
Returns:
pandas.DataFrame or None: DataFrame of perfect BLAST matches or None if no perfect matches exist
"""
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0] == 0:
return None
return df_perfect_matches
def top_result(self):
"""Return top `blastn` result
Try to find a 100% identity and coverage result (perfect match).
If one does not exist, then retrieve the result with the highest bitscore.
Returns:
Ordered dict of BLASTN results or None if no BLASTN results generated
"""
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0]:
self.is_perfect_match = True
return BlastReader.df_first_row_to_dict(df_perfect_matches)
# Return the result with the highest bitscore.
# This is the first result in dataframe since the df is ordered by
# bitscore in descending order.
result_dict = BlastReader.df_first_row_to_dict(self.df)
if result_dict is None:
return None
result_trunc = BlastReader.is_blast_result_trunc(qstart=result_dict['qstart'],
qend=result_dict['qend'],
sstart=result_dict['sstart'],
send=result_dict['send'],
qlen=result_dict['qlen'],
slen=result_dict['slen'])
self.is_trunc = result_trunc
return result_dict
| apache-2.0 |
AnasGhrab/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
shahankhatch/scikit-learn | sklearn/covariance/tests/test_covariance.py | 69 | 11116 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
zfrenchee/pandas | doc/make.py | 8 | 12640 | #!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
<del>Note: currently latex builds do not work because of table formats that are not
supported in the latex generation.</del>
2014-01-30: Latex has some issues but 'latex_forced' works ok for 0.13.0-400 or so
Usage
-----
python make.py clean
python make.py html
"""
from __future__ import print_function
import io
import glob # noqa
import os
import shutil
import sys
from contextlib import contextmanager
import sphinx # noqa
import argparse
import jinja2 # noqa
os.environ['PYTHONPATH'] = '..'
SPHINX_BUILD = 'sphinxbuild'
def _process_user(user):
if user is None or user is False:
user = ''
else:
user = user + '@'
return user
def upload_dev(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/ -essh'.format(user)):
raise SystemExit('Upload to Pydata Dev failed')
def upload_dev_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/'.format(user)):
raise SystemExit('PDF upload to Pydata Dev failed')
def upload_stable(user=None):
'push a copy to the pydata stable directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/ -essh'.format(user)):
raise SystemExit('Upload to stable failed')
def upload_stable_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/'.format(user)):
raise SystemExit('PDF upload to stable failed')
def upload_prev(ver, doc_root='./', user=None):
'push a copy of older release to appropriate version directory'
user = _process_user(user)
local_dir = doc_root + 'build/html'
remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver
cmd = 'cd %s; rsync -avz . %spandas.pydata.org:%s -essh'
cmd = cmd % (local_dir, user, remote_dir)
print(cmd)
if os.system(cmd):
raise SystemExit(
'Upload to %s from %s failed' % (remote_dir, local_dir))
local_dir = doc_root + 'build/latex'
pdf_cmd = 'cd %s; scp pandas.pdf %spandas.pydata.org:%s'
pdf_cmd = pdf_cmd % (local_dir, user, remote_dir)
if os.system(pdf_cmd):
raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root))
def build_pandas():
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
def build_prev(ver):
if os.system('git checkout v%s' % ver) != 1:
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
os.system('python make.py clean')
os.system('python make.py html')
os.system('python make.py latex')
os.system('git checkout master')
def clean():
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('source/generated'):
shutil.rmtree('source/generated')
@contextmanager
def maybe_exclude_notebooks():
"""
Skip building the notebooks if pandoc is not installed.
This assumes that nbsphinx is installed.
"""
base = os.path.dirname(__file__)
notebooks = [os.path.join(base, 'source', nb)
for nb in ['style.ipynb']]
contents = {}
def _remove_notebooks():
for nb in notebooks:
with open(nb, 'rt') as f:
contents[nb] = f.read()
os.remove(nb)
# Skip notebook conversion if
# 1. nbconvert isn't installed, or
# 2. nbconvert is installed, but pandoc isn't
try:
import nbconvert
except ImportError:
print("Warning: nbconvert not installed. Skipping notebooks.")
_remove_notebooks()
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
print("Warning: Pandoc is not installed. Skipping notebooks.")
_remove_notebooks()
yield
for nb, content in contents.items():
with open(nb, 'wt') as f:
f.write(content)
def html():
check_build()
with maybe_exclude_notebooks():
if os.system('sphinx-build -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
try:
# remove stale file
os.remove('build/html/pandas.zip')
except:
pass
def zip_html():
try:
print("\nZipping up HTML docs...")
# just in case the wonky build box doesn't have zip
# don't fail this.
os.system('cd build; rm -f html/pandas.zip; zip html/pandas.zip -r -q html/* ')
print("\n")
except:
pass
def latex():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
print("Rendering LaTeX failed.")
print("You may still be able to get a usable PDF file by going into 'build/latex'")
print("and executing 'pdflatex pandas.tex' for the requisite number of passes.")
print("Or using the 'latex_forced' target")
raise SystemExit
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def latex_forced():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Manually call pdflatex, 3 passes should ensure latex fixes up
# all the required cross-references and such.
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
raise SystemExit("You should check the file 'build/latex/pandas.pdf' for problems.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def check_build():
build_dirs = [
'build', 'build/doctrees', 'build/html',
'build/latex', 'build/plots', 'build/_static',
'build/_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def all():
# clean()
html()
def auto_dev_build(debug=False):
msg = ''
try:
step = 'clean'
clean()
step = 'html'
html()
step = 'upload dev'
upload_dev()
if not debug:
sendmail(step)
step = 'latex'
latex()
step = 'upload pdf'
upload_dev_pdf()
if not debug:
sendmail(step)
except (Exception, SystemExit) as inst:
msg = str(inst) + '\n'
sendmail(step, '[ERROR] ' + msg)
def sendmail(step=None, err_msg=None):
from_name, to_name = _get_config()
if step is None:
step = ''
if err_msg is None or '[ERROR]' not in err_msg:
msgstr = 'Daily docs %s completed successfully' % step
subject = "DOC: %s successful" % step
else:
msgstr = err_msg
subject = "DOC: %s failed" % step
import smtplib
from email.MIMEText import MIMEText
msg = MIMEText(msgstr)
msg['Subject'] = subject
msg['From'] = from_name
msg['To'] = to_name
server_str, port, login, pwd = _get_credentials()
server = smtplib.SMTP(server_str, port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(login, pwd)
try:
server.sendmail(from_name, to_name, msg.as_string())
finally:
server.close()
def _get_dir(subdir=None):
import getpass
USERNAME = getpass.getuser()
if sys.platform == 'darwin':
HOME = '/Users/%s' % USERNAME
else:
HOME = '/home/%s' % USERNAME
if subdir is None:
subdir = '/code/scripts/config'
conf_dir = '%s/%s' % (HOME, subdir)
return conf_dir
def _get_credentials():
tmp_dir = _get_dir()
cred = '%s/credentials' % tmp_dir
with open(cred, 'r') as fh:
server, port, un, domain = fh.read().split(',')
port = int(port)
login = un + '@' + domain + '.com'
import base64
with open('%s/cron_email_pwd' % tmp_dir, 'r') as fh:
pwd = base64.b64decode(fh.read())
return server, port, login, pwd
def _get_config():
tmp_dir = _get_dir()
with open('%s/addresses' % tmp_dir, 'r') as fh:
from_name, to_name = fh.read().split(',')
return from_name, to_name
funcd = {
'html': html,
'zip_html': zip_html,
'upload_dev': upload_dev,
'upload_stable': upload_stable,
'upload_dev_pdf': upload_dev_pdf,
'upload_stable_pdf': upload_stable_pdf,
'latex': latex,
'latex_forced': latex_forced,
'clean': clean,
'auto_dev': auto_dev_build,
'auto_debug': lambda: auto_dev_build(True),
'build_pandas': build_pandas,
'all': all,
}
small_docs = False
# current_dir = os.getcwd()
# os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
import argparse
argparser = argparse.ArgumentParser(description="""
pandas documentation builder
""".strip())
# argparser.add_argument('-arg_name', '--arg_name',
# metavar='label for arg help',
# type=str|etc,
# nargs='N|*|?|+|argparse.REMAINDER',
# required=False,
# #choices='abc',
# help='help string',
# action='store|store_true')
# args = argparser.parse_args()
#print args.accumulate(args.integers)
def generate_index(api=True, single=False, **kwds):
from jinja2 import Template
with open("source/index.rst.template") as f:
t = Template(f.read())
with open("source/index.rst","w") as f:
f.write(t.render(api=api,single=single,**kwds))
import argparse
argparser = argparse.ArgumentParser(description="pandas documentation builder",
epilog="Targets : %s" % funcd.keys())
argparser.add_argument('--no-api',
default=False,
help='Ommit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=False,
help='filename of section to compile, e.g. "indexing"')
argparser.add_argument('--user',
type=str,
default=False,
help='Username to connect to the pydata server')
def main():
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
if args.single:
args.single = os.path.basename(args.single).split(".rst")[0]
if 'clean' in unknown:
args.single=False
generate_index(api=not args.no_api and not args.single, single=args.single)
if len(sys.argv) > 2:
ftype = sys.argv[1]
ver = sys.argv[2]
if ftype == 'build_previous':
build_prev(ver, user=args.user)
if ftype == 'upload_previous':
upload_prev(ver, user=args.user)
elif len(sys.argv) == 2:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s' % (
arg, list(funcd.keys())))
if args.user:
func(user=args.user)
else:
func()
else:
small_docs = False
all()
# os.chdir(current_dir)
if __name__ == '__main__':
import sys
sys.exit(main())
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/gaussian_process/gpc.py | 18 | 31958 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf, expit
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_ : array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_ : array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = expit(f)
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
multi_class : string, default : "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
.. versionadded:: 0.18
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
grehx/spark-tk | regression-tests/sparktkregtests/testcases/graph/graph_lbp_test.py | 1 | 8856 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests LBP Graphx implementation by comparing results agains graphlab """
import unittest
from sparktkregtests.lib import sparktk_test
class LbpPottsModel(sparktk_test.SparkTKTestCase):
def test_lbp_cross_3_state(self):
"""Test 3 state Potts model"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0 0.0"],
[2, ".3 .3 .3"],
[3, "1.0 0.0 0.0"],
[4, "0.0 1.0 0.0"],
[5, "0.0 0.0 1.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0, 0.0),
2: (0.57611688, 0.2119415576, 0.2119415576),
3: (1.0, 0.0, 0.0),
4: (0.0, 1.0, 0.0),
5: (0.0, 0.0, 1.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
self.assertAlmostEqual(known_vals[row["id"]][2], values[2])
def test_lbp_cross_50(self):
"""Test a balanced graph"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0"],
[2, ".5 .5"],
[3, "1.0 0.0"],
[4, "0.0 1.0"],
[5, "0.0 1.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0),
2: (0.5, 0.5),
3: (1.0, 0.0),
4: (0.0, 1.0),
5: (0.0, 1.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_cross_3_1(self):
"""Test LBP on a cross with a 3-1 split on the distribution"""
vertex_frame = self.context.frame.create(
[[1, "1.0 0.0"],
[2, "0.5 0.5"],
[3, "1.0 0.0"],
[4, "0.0 1.0"],
[5, "1.0 0.0"]],
[("id", int),
("vertex_weight", str)])
edge_frame = self.context.frame.create(
[[2, 3, 1.0],
[2, 1, 1.0],
[2, 4, 1.0],
[2, 5, 1.0]],
[("src", int),
("dst", int),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {1: (1.0, 0.0),
2: (0.88079707798, 0.119202922),
3: (1.0, 0.0),
4: (0.0, 1.0),
5: (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_cross(self):
"""Test lbp on a basic cross with a 4-0 split"""
vertex_frame = self.context.frame.create(
[["1", "1.0 0.0"],
["2", ".5 .5"],
["3", "1.0 0.0"],
["4", "1.0 0.0"],
["5", "1.0 0.0"]],
[("id", str), ("vertex_weight", str)])
edge_frame = self.context.frame.create(
[["2", "3", 0.5],
["2", "1", 0.5],
["2", "4", 0.5],
["2", "5", 0.5]],
[("src", str),
("dst", str),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {"1": (1.0, 0.0),
"2": (0.88079707797, 0.11920292202),
"3": (1.0, 0.0),
"4": (1.0, 0.0),
"5": (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
def test_lbp_double_cross(self):
"""Test lbp on a double cross"""
vertex_frame = self.context.frame.create(
[["1", "1.0 0.0", 1, "1.0 0.0"],
["2", "0.5 0.5", 0, ""],
["3", "1.0 0.0", 1, "1.0 0.0"],
["4", "0.0 1.0", 1, "0.0 1.0"],
["5", "0.5 0.5", 0, ""],
["6", "0.0 1.0", 1, "0.0 1.0"],
["7", "0.0 1.0", 1, "0.0 1.0"],
["8", "1.0 0.0", 1, "1.0 0.0"]],
[("id", str),
("vertex_weight", str),
("is_observed", int), ("label", str)])
edge_frame = self.context.frame.create(
[["2", "3", 1.0],
["2", "1", 1.0],
["2", "4", 1.0],
["2", "5", 1.0],
["6", "5", 1.0],
["7", "5", 1.0],
["8", "5", 1.0]],
[("src", str),
("dst", str),
("weight", float)])
graph = self.context.graph.create(vertex_frame, edge_frame)
potts = graph.loopy_belief_propagation("vertex_weight", "weight", 2)
known_vals = {"1": (1.0, 0.0),
"2": (0.6378903114, 0.36210968),
"3": (1.0, 0.0),
"4": (0.0, 1.0),
"5": (0.36210968, 0.6378903114),
"6": (0.0, 1.0),
"7": (0.0, 1.0),
"8": (1.0, 0.0)}
potts_vals = potts.to_pandas(potts.count())
for _, row in potts_vals.iterrows():
values = map(float, row["posterior"][1:-1].split(","))
self.assertAlmostEqual(known_vals[row["id"]][0], values[0])
self.assertAlmostEqual(known_vals[row["id"]][1], values[1])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
fzadow/CATMAID | django/applications/catmaid/control/useranalytics.py | 4 | 16549 | import numpy as np
from datetime import timedelta, datetime
from django.http import HttpResponse
from catmaid.models import Connector, Treenode, Review
from catmaid.control.user_evaluation import _parse_date
# Because we don't want to show generated images in a window, we can use
# the Agg backend. This avoids some potential threading issues.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, DayLocator
from pylab import figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Bout(object):
""" Represents one bout, based on a list of events. The first event ist the
start date/time, the last event the end.
"""
def __init__(self, start, end=None):
self.events = [start]
if end:
self.events.append(end)
def addEvent(self, e):
""" Increments the event counter.
"""
self.events.append(e)
@property
def nrEvents(self):
return len(self.events)
@property
def start(self):
return self.events[0]
@property
def end(self):
return self.events[-1]
def __str__(self):
return "Bout with %s events [%s, %s]" % \
(self.nrEvents, self.start, self.end)
def plot_useranalytics(request):
""" Creates a PNG image containing different plots for analzing the
performance of individual users over time.
"""
userid = request.GET.get('userid', -1)
start_date = request.GET.get('start')
end_date = request.GET.get('end')
print userid, start_date, end_date
if request.user.is_superuser:
end = _parse_date(end_date) if end_date else datetime.now()
start = _parse_date(start_date) if start_date else end - timedelta(end.isoweekday() + 7)
f = generateReport( userid, 10, start, end )
else:
f = figure(1, figsize=(6,6))
canvas = FigureCanvasAgg( f )
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def eventTimes(user_id, start_date, end_date):
""" Returns a tuple containing a list of tree node edition times, connector
edition times and tree node review times within the date range specified
where the editor/reviewer is the given user.
"""
dr = (start_date, end_date)
tns = Treenode.objects.filter(
editor_id = user_id,
edition_time__range=dr).values_list('edition_time', flat=True)
cns = Connector.objects.filter(
editor_id = user_id,
edition_time__range=dr).values_list('edition_time', flat=True)
rns = Review.objects.filter(
reviewer_id = user_id,
review_time__range=dr).values_list('review_time', flat=True)
return list(tns), list(cns), list(rns)
def eventsPerInterval(times, start_date, end_date, interval='day'):
""" Creates a histogram of how many events fall into all intervals between
<start_data> and <end_date>. The interval type can be day, hour and
halfhour. Returned is a tuple containing two elemens: the histogram and a
time axis, labeling every bin.
"""
if interval=='day':
intervalsPerDay = 1
secondsPerInterval = 86400
elif interval=='hour':
intervalsPerDay = 24
secondsPerInterval = 3600
elif interval=='halfhour':
intervalsPerDay = 48
secondsPerInterval = 1800
else:
raise ValueError('Interval options are day, hour, or halfhour')
# Generate axis
daycount = (end_date - start_date).days
dt = timedelta(0, secondsPerInterval)
timeaxis = [start_date + n*dt for n in xrange(intervalsPerDay * daycount)]
# Calculate bins
timebins = np.zeros(intervalsPerDay * daycount)
intervalsPerSecond = 1.0 / secondsPerInterval
for t in times:
i = int((t - start_date).total_seconds() * intervalsPerSecond)
timebins[i] += 1
return timebins, timeaxis
def activeTimes( alltimes, gapThresh ):
""" Goes through the sorted array of time differences between all events
stored in <alltimes>. If two events are closer together than <gapThresh>
minutes, they are counted as events within one bout. A tuple containing a
list of bout start dates as well as a list with total numbers of events for
each bout is returned.
"""
# Sort all events and create a list of (time) differences between them
alltimes.sort()
dts = np.diff(alltimes)
# Threshold between to events to be counted as separate bouts (seconds)
threshold = 60 * gapThresh
# Indicates whether we are currently in a bout and since we haven't even
# looked at the first event, we are initially not.
bout = None
# Go through all events
for i, e in enumerate(alltimes):
if i > 0 and dts[i-1].total_seconds() < threshold:
# Increment current bout's event counter and continue with the
# next element as long as the time difference to the next
# element is below our threshold.
bout.addEvent(e)
continue
else:
# Return current bout (not available in first iteration) and create
# a new one.
if bout:
yield bout
bout = Bout(e)
# Return last bout, if it hasn't been returned, yet
if bout:
yield bout
def activeTimesPerDay(active_bouts):
""" Creates a tuple containing the active time in hours for every day
between the first event of the first bout and the last event of the last
bout as well as a list with the date for every day.
"""
# Return right away if there are no bouts
if not active_bouts:
return [], []
# Find first event of first bout
daystart = active_bouts[0].start.replace(
hour=0, minute=0, second=0, microsecond=0)
# Find last event of last bout
dayend = active_bouts[-1].end
# Get total number of between first event and last event
numdays = (dayend - daystart).days + 1
# Create a list of dates for every day between first and last event
timeaxis = [daystart.date() + timedelta(d) for d in range(numdays)]
# Calculate the netto active time for each day
net_active_time = np.array(np.zeros(numdays))
for bout in active_bouts:
active_time = (bout.end - bout.start).total_seconds()
net_active_time[(bout.start - daystart).days] += active_time
# Return a tuple containing the active time for every
# day in hours and the list of days.
return np.divide(net_active_time, 3600), timeaxis
def singleDayEvents( alltimes, start_hour, end_hour ):
alltimes.sort()
timeaxis = [n for n in np.add(start_hour,range(end_hour-start_hour+1))]
activity = np.zeros(end_hour-start_hour+1)
for a in alltimes:
if a.hour >= start_hour:
if a.hour < end_hour:
activity[a.hour-start_hour] += 1
return np.true_divide(activity,(alltimes[-1] - alltimes[0]).days), timeaxis
def singleDayActiveness( activebouts, increment, start_hour, end_hour ):
""" Returns a ... for all bouts between <start_hour> and <end_hour> of the
day.
"""
# Return right away, when there are no bouts given
if not activebouts:
return [], []
# Make sure 60 can be cleanly devided by <incement>
if np.mod(60, increment) > 0:
raise ValueError('Increments must divide 60 evenly')
# Some constants
stepsPerHour = 60 / increment
hoursConsidered = (end_hour - start_hour) + 1
daysConsidered = (activebouts[-1].end - activebouts[0].start).days + 1
# Get start of current day
starttime = datetime.now()
# FIXME: replace doesn't replace in place, but returns a new object
starttime.replace(hour=start_hour,minute=0,second=0,microsecond=0)
# Create time axis list with entry for every <increment> minutes between
# <start_hour> and <end_hour>.
timeaxis = [starttime + timedelta(0, 0, 0, 0, n * increment) \
for n in range(stepsPerHour * hoursConsidered)]
# Loop through all days considered to find number of weekend days
weekendCorrection = 0
for d in range(daysConsidered):
# TODO: Why is 0 and 6 used for comparison?
saturday = (activebouts[0].start + timedelta(d)).isoweekday() == 0
sunday = (activebouts[0].start + timedelta(d)).isoweekday() == 6
if saturday or sunday:
weekendCorrection += 1
# Initialize list for minutes per period with zeros
durPerPeriod = np.zeros(stepsPerHour * hoursConsidered)
for bout in activebouts:
# Ignore bouts what start after requested <end_hour> or end before
# requested <start_hour>.
if bout.start.hour > end_hour:
continue
elif bout.end.hour < start_hour:
continue
# Crop start and end times of every valid bout to request period
elif bout.start.hour < start_hour:
# FIXME: replace doesn't replace in place, but returns a new object
bout.start.replace(hour=start_hour,minute=0,second=0,microsecond=0)
elif bout.end.hour > end_hour:
# FIXME: replace doesn't replace in place, but returns a new object
bout.end.replace(hour=end_hour,minute=0,second=0,microsecond=0)
# Go through every sub bout, defined by periods if <increment> minutes,
# and store the number of minutes for every time-fraction considered.
for subbout in splitBout(bout,increment):
subboutSeconds = (subbout.end - subbout.start).total_seconds()
i = stepsPerHour * (subbout.start.hour - start_hour) + \
subbout.start.minute / increment
durPerPeriod[i] += np.true_divide(subboutSeconds, 60)
# Divide each period (in seconds) by ?
n = increment * (daysConsidered - weekendCorrection)
durations = np.true_divide(durPerPeriod, n)
# Return a tuple containing a list durations and a list of timepoints
return durations, timeaxis
def splitBout(bout,increment):
""" Splits one bout in periods of <increment> minutes.
"""
if np.mod(60, increment) > 0:
raise RuntimeError('Increments must divide 60 evenly')
boutListOut = []
currtime = bout.start
nexttime = bout.start
while nexttime < bout.end:
basemin = increment * ( currtime.minute / increment )
nexttime = currtime.replace(minute=0,second=0,microsecond=0) + timedelta(0,0,0,0,basemin+increment)
if nexttime > bout.end:
nexttime = bout.end
boutListOut.append(Bout(currtime, nexttime))
currtime = nexttime
return boutListOut
def generateErrorImage(msg):
""" Creates an empty image (based on image nr. 1) and adds a message to it.
"""
fig = plt.figure(1, figsize=(6,6))
fig.clf()
fig.suptitle(msg)
return fig
def generateReport( user_id, activeTimeThresh, start_date, end_date ):
""" nts: node times
cts: connector times
rts: review times """
nts, cts, rts = eventTimes( user_id, start_date, end_date )
# If no nodes have been found, return an image with a descriptive text.
if len(nts) == 0:
return generateErrorImage("No tree nodes were edited during the " +
"defined period if time.")
annotationEvents, ae_timeaxis = eventsPerInterval( nts + cts, start_date, end_date )
reviewEvents, re_timeaxis = eventsPerInterval( rts, start_date, end_date )
activeBouts = list(activeTimes( nts+cts+rts, activeTimeThresh ))
netActiveTime, at_timeaxis = activeTimesPerDay( activeBouts )
dayformat = DateFormatter('%b %d')
fig = plt.figure(figsize=(12,10))
# Top left plot: created and edited nodes per day
ax1 = plt.subplot2grid((2,2), (0,0))
an = ax1.bar( ae_timeaxis, annotationEvents, color='#0000AA')
rv = ax1.bar( re_timeaxis, reviewEvents, bottom=annotationEvents, color='#AA0000')
ax1.set_xlim((start_date,end_date))
ax1.legend( (an, rv), ('Annotated', 'Reviewed'), loc=2,frameon=False )
ax1.set_ylabel('Nodes')
yl = ax1.get_yticklabels()
plt.setp(yl, fontsize=10)
ax1.xaxis.set_major_formatter(dayformat)
xl = ax1.get_xticklabels()
plt.setp(xl, rotation=30, fontsize=10)
ax1.set_title('Edit events', fontsize=10)
# Bottom left plot: net active time per day
ax2 = plt.subplot2grid((2,2), (1,0))
ax2.bar( at_timeaxis, netActiveTime, color='k')
ax2.set_xlim((start_date,end_date))
ax2.set_ylabel('Hours')
yl = ax2.get_yticklabels()
plt.setp(yl, fontsize=10)
ax2.xaxis.set_major_formatter(dayformat)
xl = ax2.get_xticklabels()
plt.setp(xl, rotation=30, fontsize=10)
ax2.set_title('Net daily active time', fontsize=10)
"""
ax3 = fig.add_subplot(223)
ax3 = eventsPerIntervalPerDayPlot(ax3, rts+nts+cts, start_date, end_date, 30 )
"""
# Right column plot: bouts over days
ax4 = plt.subplot2grid((2,2), (0,1), rowspan=2)
ax4 = dailyActivePlotFigure( activeBouts, ax4, start_date, end_date )
yl = ax4.get_yticklabels()
plt.setp(yl, fontsize=10)
ax4.xaxis.set_major_formatter(dayformat)
xl = ax4.get_xticklabels()
plt.setp(xl, rotation=30, fontsize=10)
ax4.set_title('Active Bouts', fontsize=10)
yl = ax4.get_yticklabels()
plt.setp(yl, fontsize=10)
ax4.set_ylabel('Time (24 hr)')
return fig
def dailyActivePlotFigure( activebouts, ax, start_date, end_date ):
""" Draws a plot of all bouts during each day between <start_date> and
<end_date> to the plot given by <ax>.
"""
# Y axis: Draw a line for each two hours in a day and set ticks accordingly
for i in range(12):
ax.axhline(2 * i, color='#AAAAAA', linestyle = ':')
ax.axhspan(8,18,facecolor='#999999',alpha=0.25)
ax.set_yticks([0,2,4,6,8,10,12,14,16,18,20,22,24])
# X axis: Ticks and labels for every day
ax.xaxis.set_major_locator(DayLocator())
# Draw all bouts
for bout in activebouts:
# Ignore bouts that span accross midnight
# TODO: Draw midnight spanning bouts, too.
if bout.start.day == bout.end.day:
isodate = bout.start.isocalendar()
ax.bar( bout.start.replace(hour=0,minute=0,second=0,microsecond=0),
np.true_divide((bout.end-bout.start).total_seconds(), 3600),
bottom=bout.start.hour + bout.start.minute/60.0 + bout.start.second/3600.0,
alpha=0.5, color='#0000AA')
# Set Axis limits
ax.set_ylim((0, 24))
ax.set_xlim((start_date, end_date))
return ax
def eventsPerIntervalPerDayPlot(ax,times,start_date,end_date,interval=60):
if np.mod(24 * 60, interval) > 0:
raise ValueError('Interval in minutes must divide the day evenly')
daycount = (end_date-start_date).days
timebins = {}
for i in range(daycount):
timebins[i] = np.zeros(24 * 60 / interval)
dayList = []
daylabels = []
for i in range(daycount):
day = start_date + timedelta( i )
dayList.append( day )
daylabels.append( str(day.month) + '/' + str(day.day) )
timeaxis = [i for i in range(24 * 60 / interval )]
timelabels = []
for i in range(24 * 60 / 30):
if np.mod(i,2)==0:
timelabels.append( str(i/2) + ':00' )
else:
timelabels.append( str( (i-1)/2 ) + ':30' )
for t in times:
timebins[np.floor((t-start_date).days)][ np.floor(np.divide(t.hour*60+t.minute, interval)) ] += 1
meandat = np.zeros(len(timebins[0]))
ignoredDays = 0
ind = 0
cm = plt.get_cmap('jet',len(timebins))
dats = []
for dat in timebins.values():
if np.sum(dat)==0:
ignoredDays += 1
else:
tmp, = ax.plot( timeaxis, dat, marker='s', linestyle='-.',alpha=0.5, color=cm(ind) )
dats.append(tmp)
meandat += dat
ind += 1
meandat = np.divide(meandat, daycount-ignoredDays)
tmp, = ax.plot( timeaxis, meandat, color='k', linewidth=4, linestyle='-')
dats.append(tmp)
daylabels.append('Mean')
ax.set_xticks( timeaxis )
ax.set_xticklabels( timelabels )
xl = ax.get_xticklabels()
plt.setp(xl, rotation=30, fontsize=10)
yl = ax.get_yticklabels()
plt.setp(yl, fontsize=10)
ax.set_ylabel('Events',fontsize=10)
ax.set_xlim( 8 * 60 / interval, 19 * 60 / interval )
ax.legend(dats,daylabels,loc=2,frameon=False)
return ax
| agpl-3.0 |
IshankGulati/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
herilalaina/scikit-learn | examples/gaussian_process/plot_gpc.py | 49 | 3995 | """
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data",
edgecolors=(0, 0, 0))
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data",
edgecolors=(0, 0, 0))
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure(1)
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/frame/methods/test_head_tail.py | 6 | 1911 | import numpy as np
from pandas import DataFrame
import pandas._testing as tm
def test_head_tail_generic(index, frame_or_series):
# GH#5370
ndim = 2 if frame_or_series is DataFrame else 1
shape = (len(index),) * ndim
vals = np.random.randn(*shape)
obj = frame_or_series(vals, index=index)
tm.assert_equal(obj.head(), obj.iloc[:5])
tm.assert_equal(obj.tail(), obj.iloc[-5:])
# 0-len
tm.assert_equal(obj.head(0), obj.iloc[0:0])
tm.assert_equal(obj.tail(0), obj.iloc[0:0])
# bounded
tm.assert_equal(obj.head(len(obj) + 1), obj)
tm.assert_equal(obj.tail(len(obj) + 1), obj)
# neg index
tm.assert_equal(obj.head(-3), obj.head(len(index) - 3))
tm.assert_equal(obj.tail(-3), obj.tail(len(index) - 3))
def test_head_tail(float_frame):
tm.assert_frame_equal(float_frame.head(), float_frame[:5])
tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
tm.assert_frame_equal(float_frame.head(0), float_frame[0:0])
tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0])
tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1])
tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:])
tm.assert_frame_equal(float_frame.head(1), float_frame[:1])
tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:])
# with a float index
df = float_frame.copy()
df.index = np.arange(len(float_frame)) + 0.1
tm.assert_frame_equal(df.head(), df.iloc[:5])
tm.assert_frame_equal(df.tail(), df.iloc[-5:])
tm.assert_frame_equal(df.head(0), df[0:0])
tm.assert_frame_equal(df.tail(0), df[0:0])
tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
def test_head_tail_empty():
# test empty dataframe
empty_df = DataFrame()
tm.assert_frame_equal(empty_df.tail(), empty_df)
tm.assert_frame_equal(empty_df.head(), empty_df)
| bsd-3-clause |
Winand/pandas | pandas/tests/plotting/test_boxplot_method.py | 4 | 16251 | # coding: utf-8
import pytest
import itertools
import string
from distutils.version import LooseVersion
from pandas import Series, DataFrame, MultiIndex
from pandas.compat import range, lzip
import pandas.util.testing as tm
import numpy as np
from numpy import random
from numpy.random import randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)
""" Test cases for .boxplot method """
tm._skip_if_no_mpl()
def _skip_if_mpl_14_or_dev_boxplot():
# GH 8382
# Boxplot failures on 1.4 and 1.4.1
# Don't need try / except since that's done at class level
import matplotlib
if str(matplotlib.__version__) >= LooseVersion('1.4'):
pytest.skip("Matplotlib Regression in 1.4 and current dev.")
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot, return_type='dict')
_check_plot_works(df.boxplot, column=[
'one', 'two'], return_type='dict')
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=['one', 'two'],
by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='indic')
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting._core.boxplot, data=df['one'],
return_type='dict')
_check_plot_works(df.boxplot, notch=1, return_type='dict')
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='indic', notch=1)
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
df['Y'] = Series(['A'] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='X')
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot('Col1', by='X', ax=ax)
ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')
ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()
assert ax_axes is axes['A']
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(column=['Col1', 'Col2'],
by='X', ax=ax, return_type='axes')
assert axes['Col1'].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type='dict')
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.boxplot(return_type='NOTATYPE')
result = df.boxplot()
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='dict')
self._check_box_return_type(result, 'dict')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='axes')
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='both')
self._check_box_return_type(result, 'both')
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df['age'] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(['height', 'weight'], by='category')
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(['height', 'weight', 'age'], by='category')
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
_check_ax_limits(df['age'], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
_skip_if_mpl_14_or_dev_boxplot()
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type='axes')
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5),
columns=['A', 'B', 'C', 'D', 'E'])
result = df.boxplot(return_type='axes', figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(df.boxplot("a", fontsize=16),
xlabelsize=16, ylabelsize=16)
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy(self):
grouped = self.hist_df.groupby(by='gender')
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(['male', 'female'], size=n)
df = DataFrame({'height': height, 'weight': weight, 'gender': gender})
gb = df.groupby('gender')
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type='axes')
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby('gender').hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by='gender')
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None,
expected_keys=['height', 'weight', 'category'])
# now for groupby
result = df.groupby('gender').boxplot(return_type='dict')
self._check_box_return_type(
result, 'dict', expected_keys=['Male', 'Female'])
columns2 = 'X B C D A G Y N Q O'.split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = 'A B C D E F G H I J'.split()
df2['category'] = categories2 * 5
for t in ['dict', 'axes', 'both']:
returned = df.groupby('classroom').boxplot(return_type=t)
self._check_box_return_type(
returned, t, expected_keys=['A', 'B', 'C'])
returned = df.boxplot(by='classroom', return_type=t)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'])
returned = df2.groupby('category').boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by='category', return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(1, 1))
pytest.raises(ValueError, df.boxplot,
column=['height', 'weight', 'category'],
layout=(2, 1), return_type='dict')
pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('gender').boxplot,
column='height', return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('classroom').boxplot,
column='height', return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(column=['height', 'weight', 'category'], by='gender',
return_type='axes')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes['height']]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes['weight'], axes['category']]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
layout=(3, 2), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
layout=(3, -1), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(4, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(-1, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], layout=(1, 4),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby('classroom').boxplot( # noqa
column=['height', 'weight', 'category'], layout=(1, -1),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby('category').boxplot(
column='height', return_type='axes', ax=axes)
self._check_axes_shape(self.plt.gcf().axes,
axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(column=['height', 'weight', 'category'],
by='gender', return_type='axes', ax=axes[0])
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'],
return_type='axes', ax=axes[1])
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby('classroom').boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(df.boxplot("a", by="b", fontsize=16),
xlabelsize=16, ylabelsize=16)
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | sklearn/cluster/mean_shift_.py | 21 | 13948 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
xyguo/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
liangz0707/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Dynamic_Shear_Behaviour/Frictional_SDOF_With_Damping/c_t_1/ElPPlShear/Displacement_Response.py | 12 | 2048 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Displacement.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(times,disp,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Frictional_SDOF_freeVibration.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Plot the figure. Add labels and titles.
plt.plot(times,disp,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Displacement_Response.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
MaxInGaussian/ZS-VAFNN | demo/training_graph.py | 1 | 1902 | ''' training_graph.py '''
import numpy as np
import tensorflow as tf
from SGPA_graph import build_SGPA_graph
from sklearn.neighbors import KernelDensity
## Computation graph for training
def build_training_graph(layers_width, n_basis, learn_rate, Y_std):
n_samples = tf.placeholder(tf.int32, shape=())
X = tf.placeholder(tf.float32, shape=[None, layers_width[0]])
Y = tf.placeholder(tf.float32, shape=[None, layers_width[-1]])
## Computation graph for optimization
F, KL = build_SGPA_graph(X, layers_width, n_samples, n_basis)
# F_mean, F_variance = tf.nn.moments(F, axes=[0])
F_mean = tf.reduce_mean(F, 0)
F_variance = tf.reduce_mean((F-tf.expand_dims(F_mean, 0))**2, 0)
# noise = tf.get_variable('noise', shape=(),
# initializer=tf.constant_initializer(0.2))
F_variance += F_mean**2
# obj = tf.log(noise)+tf.reduce_mean((Y-F_mean)**2)/noise+KL
obj = tf.losses.mean_squared_error(F_mean, Y)
global_step = tf.Variable(0, trainable=False)
learn_rate_ts = tf.train.exponential_decay(
learn_rate, global_step, 10000, 0.96, staircase=True)
optimizer = tf.train.AdadeltaOptimizer(learn_rate_ts)
infer_op = optimizer.minimize(obj, global_step=global_step)
## Computation graph for saving the best set of parameters
save_vars = []
for var in tf.trainable_variables():
save_vars.append(tf.Variable(var.initialized_value()))
assign_to_save, assign_to_restore = [], []
for var, save_var in zip(tf.trainable_variables(), save_vars):
assign_to_save.append(save_var.assign(var))
assign_to_restore.append(var.assign(save_var))
## Computation graph for evaluation
rmse = tf.sqrt(tf.losses.mean_squared_error(F_mean*Y_std, Y*Y_std))
nlpd = .5*tf.reduce_mean(tf.log(F_variance*Y_std**2.)+\
(F_mean-Y)**2/F_variance)+.5*np.log(2*np.pi)
return locals() | apache-2.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/markers.py | 2 | 31311 | """
This module contains functions to handle markers. Used by both the
marker functionality of `~matplotlib.axes.Axes.plot` and
`~matplotlib.axes.Axes.scatter`.
All possible markers are defined here:
============================== ===============================================
marker description
============================== ===============================================
`"."` point
`","` pixel
`"o"` circle
`"v"` triangle_down
`"^"` triangle_up
`"<"` triangle_left
`">"` triangle_right
`"1"` tri_down
`"2"` tri_up
`"3"` tri_left
`"4"` tri_right
`"8"` octagon
`"s"` square
`"p"` pentagon
`"P"` plus (filled)
`"*"` star
`"h"` hexagon1
`"H"` hexagon2
`"+"` plus
`"x"` x
`"X"` x (filled)
`"D"` diamond
`"d"` thin_diamond
`"|"` vline
`"_"` hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft (centered at tip)
CARETRIGHT caretright (centered at tip)
CARETUP caretup (centered at tip)
CARETDOWN caretdown (centered at tip)
CARETLEFTBASE caretleft (centered at base)
CARETRIGHTBASE caretright (centered at base)
CARETUPBASE caretup (centered at base)
`"None"`, `" "` or `""` nothing
``'$...$'`` render the string using mathtext.
`verts` a list of (x, y) pairs used for Path vertices.
The center of the marker is located at (0,0) and
the size is normalized.
path a `~matplotlib.path.Path` instance.
(`numsides`, `style`, `angle`) The marker can also be a tuple (`numsides`,
`style`, `angle`), which will create a custom,
regular symbol.
`numsides`:
the number of sides
`style`:
the style of the regular symbol:
0
a regular polygon
1
a star-like symbol
2
an asterisk
3
a circle (`numsides` and `angle` is
ignored)
`angle`:
the angle of rotation of the symbol
============================== ===============================================
For backward compatibility, the form (`verts`, 0) is also accepted,
but it is equivalent to just `verts` for giving a raw set of vertices
that define the shape.
`None` is the default which means 'nothing', however this table is
referred to from other docs for the valid inputs from marker inputs and in
those cases `None` still means 'default'.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from collections import Sized
import numpy as np
from . import rcParams
from .cbook import is_math_text, is_numlike
from .path import Path
from .transforms import IdentityTransform, Affine2D
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE) = xrange(12)
_empty_path = Path(np.empty((0, 2)))
class MarkerStyle(object):
markers = {
'.': 'point',
',': 'pixel',
'o': 'circle',
'v': 'triangle_down',
'^': 'triangle_up',
'<': 'triangle_left',
'>': 'triangle_right',
'1': 'tri_down',
'2': 'tri_up',
'3': 'tri_left',
'4': 'tri_right',
'8': 'octagon',
's': 'square',
'p': 'pentagon',
'*': 'star',
'h': 'hexagon1',
'H': 'hexagon2',
'+': 'plus',
'x': 'x',
'D': 'diamond',
'd': 'thin_diamond',
'|': 'vline',
'_': 'hline',
'P': 'plus_filled',
'X': 'x_filled',
TICKLEFT: 'tickleft',
TICKRIGHT: 'tickright',
TICKUP: 'tickup',
TICKDOWN: 'tickdown',
CARETLEFT: 'caretleft',
CARETRIGHT: 'caretright',
CARETUP: 'caretup',
CARETDOWN: 'caretdown',
CARETLEFTBASE: 'caretleftbase',
CARETRIGHTBASE: 'caretrightbase',
CARETUPBASE: 'caretupbase',
CARETDOWNBASE: 'caretdownbase',
"None": 'nothing',
None: 'nothing',
' ': 'nothing',
'': 'nothing'
}
# Just used for informational purposes. is_filled()
# is calculated in the _set_* functions.
filled_markers = (
'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd',
'P', 'X')
fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')
_half_fillstyles = ('left', 'right', 'bottom', 'top')
# TODO: Is this ever used as a non-constant?
_point_size_reduction = 0.5
def __init__(self, marker=None, fillstyle=None):
"""
MarkerStyle
Attributes
----------
markers : list of known markes
fillstyles : list of known fillstyles
filled_markers : list of known filled markers.
Parameters
----------
marker : string or array_like, optional, default: None
See the descriptions of possible markers in the module docstring.
fillstyle : string, optional, default: 'full'
'full', 'left", 'right', 'bottom', 'top', 'none'
"""
self._marker_function = None
self.set_fillstyle(fillstyle)
self.set_marker(marker)
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_marker_function')
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
self.set_marker(self._marker)
def _recache(self):
if self._marker_function is None:
return
self._path = _empty_path
self._transform = IdentityTransform()
self._alt_path = None
self._alt_transform = None
self._snap_threshold = None
self._joinstyle = 'round'
self._capstyle = 'butt'
self._filled = True
self._marker_function()
if six.PY3:
def __bool__(self):
return bool(len(self._path.vertices))
else:
def __nonzero__(self):
return bool(len(self._path.vertices))
def is_filled(self):
return self._filled
def get_fillstyle(self):
return self._fillstyle
def set_fillstyle(self, fillstyle):
"""
Sets fillstyle
Parameters
----------
fillstyle : string amongst known fillstyles
"""
if fillstyle is None:
fillstyle = rcParams['markers.fillstyle']
if fillstyle not in self.fillstyles:
raise ValueError("Unrecognized fillstyle %s"
% ' '.join(self.fillstyles))
self._fillstyle = fillstyle
self._recache()
def get_joinstyle(self):
return self._joinstyle
def get_capstyle(self):
return self._capstyle
def get_marker(self):
return self._marker
def set_marker(self, marker):
if (isinstance(marker, np.ndarray) and marker.ndim == 2 and
marker.shape[1] == 2):
self._marker_function = self._set_vertices
elif (isinstance(marker, Sized) and len(marker) in (2, 3) and
marker[1] in (0, 1, 2, 3)):
self._marker_function = self._set_tuple_marker
elif (not isinstance(marker, (np.ndarray, list)) and
marker in self.markers):
self._marker_function = getattr(
self, '_set_' + self.markers[marker])
elif isinstance(marker, six.string_types) and is_math_text(marker):
self._marker_function = self._set_mathtext_path
elif isinstance(marker, Path):
self._marker_function = self._set_path_marker
else:
try:
Path(marker)
self._marker_function = self._set_vertices
except ValueError:
raise ValueError('Unrecognized marker style'
' {0}'.format(marker))
self._marker = marker
self._recache()
def get_path(self):
return self._path
def get_transform(self):
return self._transform.frozen()
def get_alt_path(self):
return self._alt_path
def get_alt_transform(self):
return self._alt_transform.frozen()
def get_snap_threshold(self):
return self._snap_threshold
def _set_nothing(self):
self._filled = False
def _set_custom_marker(self, path):
verts = path.vertices
rescale = max(np.max(np.abs(verts[:, 0])),
np.max(np.abs(verts[:, 1])))
self._transform = Affine2D().scale(0.5 / rescale)
self._path = path
def _set_path_marker(self):
self._set_custom_marker(self._marker)
def _set_vertices(self):
verts = self._marker
marker = Path(verts)
self._set_custom_marker(marker)
def _set_tuple_marker(self):
marker = self._marker
if is_numlike(marker[0]):
if len(marker) == 2:
numsides, rotation = marker[0], 0.0
elif len(marker) == 3:
numsides, rotation = marker[0], marker[2]
symstyle = marker[1]
if symstyle == 0:
self._path = Path.unit_regular_polygon(numsides)
self._joinstyle = 'miter'
elif symstyle == 1:
self._path = Path.unit_regular_star(numsides)
self._joinstyle = 'bevel'
elif symstyle == 2:
self._path = Path.unit_regular_asterisk(numsides)
self._filled = False
self._joinstyle = 'bevel'
elif symstyle == 3:
self._path = Path.unit_circle()
self._transform = Affine2D().scale(0.5).rotate_deg(rotation)
else:
verts = np.asarray(marker[0])
path = Path(verts)
self._set_custom_marker(path)
def _set_mathtext_path(self):
"""
Draws mathtext markers '$...$' using TextPath object.
Submitted by tcb
"""
from matplotlib.text import TextPath
from matplotlib.font_manager import FontProperties
# again, the properties could be initialised just once outside
# this function
# Font size is irrelevant here, it will be rescaled based on
# the drawn size later
props = FontProperties(size=1.0)
text = TextPath(xy=(0, 0), s=self.get_marker(), fontproperties=props,
usetex=rcParams['text.usetex'])
if len(text.vertices) == 0:
return
xmin, ymin = text.vertices.min(axis=0)
xmax, ymax = text.vertices.max(axis=0)
width = xmax - xmin
height = ymax - ymin
max_dim = max(width, height)
self._transform = Affine2D() \
.translate(-xmin + 0.5 * -width, -ymin + 0.5 * -height) \
.scale(1.0 / max_dim)
self._path = text
self._snap = False
def _half_fill(self):
fs = self.get_fillstyle()
result = fs in self._half_fillstyles
return result
def _set_circle(self, reduction=1.0):
self._transform = Affine2D().scale(0.5 * reduction)
self._snap_threshold = np.inf
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_circle()
else:
# build a right-half circle
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._path = self._alt_path = Path.unit_circle_righthalf()
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform.frozen().rotate_deg(180.)
def _set_pixel(self):
self._path = Path.unit_rectangle()
# Ideally, you'd want -0.5, -0.5 here, but then the snapping
# algorithm in the Agg backend will round this to a 2x2
# rectangle from (-1, -1) to (1, 1). By offsetting it
# slightly, we can force it to be (0, 0) to (1, 1), which both
# makes it only be a single pixel and places it correctly
# aligned to 1-width stroking (i.e. the ticks). This hack is
# the best of a number of bad alternatives, mainly because the
# backends are not aware of what marker is actually being used
# beyond just its path data.
self._transform = Affine2D().translate(-0.49999, -0.49999)
self._snap_threshold = None
def _set_point(self):
self._set_circle(reduction=self._point_size_reduction)
_triangle_path = Path(
[[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
# Going down halfway looks to small. Golden ratio is too far.
_triangle_path_u = Path(
[[0.0, 1.0], [-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_d = Path(
[[-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [1.0, -1.0], [-1.0, -1.0],
[-3 / 5., -1 / 5.]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_l = Path(
[[0.0, 1.0], [0.0, -1.0], [-1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_r = Path(
[[0.0, 1.0], [0.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
def _set_triangle(self, rot, skip):
self._transform = Affine2D().scale(0.5, 0.5).rotate_deg(rot)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._triangle_path
else:
mpaths = [self._triangle_path_u,
self._triangle_path_l,
self._triangle_path_d,
self._triangle_path_r]
if fs == 'top':
self._path = mpaths[(0 + skip) % 4]
self._alt_path = mpaths[(2 + skip) % 4]
elif fs == 'bottom':
self._path = mpaths[(2 + skip) % 4]
self._alt_path = mpaths[(0 + skip) % 4]
elif fs == 'left':
self._path = mpaths[(1 + skip) % 4]
self._alt_path = mpaths[(3 + skip) % 4]
else:
self._path = mpaths[(3 + skip) % 4]
self._alt_path = mpaths[(1 + skip) % 4]
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_triangle_up(self):
return self._set_triangle(0.0, 0)
def _set_triangle_down(self):
return self._set_triangle(180.0, 2)
def _set_triangle_left(self):
return self._set_triangle(90.0, 3)
def _set_triangle_right(self):
return self._set_triangle(270.0, 1)
def _set_square(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 2.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
# build a bottom filled square out of two rectangles, one
# filled. Use the rotation to support left, right, bottom
# or top
if fs == 'bottom':
rotate = 0.
elif fs == 'top':
rotate = 180.
elif fs == 'left':
rotate = 270.
else:
rotate = 90.
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],
[0.0, 0.5], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],
[0.0, 1.0], [0.0, 0.5]])
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_diamond(self):
self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.0], [0.0, 1.0],
[1.0, 1.0], [0.0, 0.0]])
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_thin_diamond(self):
self._set_diamond()
self._transform.scale(0.6, 1.0)
def _set_pentagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
polypath = Path.unit_regular_polygon(5)
fs = self.get_fillstyle()
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
y = (1 + np.sqrt(5)) / 4.
top = Path([verts[0], verts[1], verts[4], verts[0]])
bottom = Path([verts[1], verts[2], verts[3], verts[4], verts[1]])
left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])
right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_star(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_star(5, innerCircle=0.381966)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
top = Path(np.vstack((verts[0:4, :], verts[7:10, :], verts[0])))
bottom = Path(np.vstack((verts[3:8, :], verts[3])))
left = Path(np.vstack((verts[0:6, :], verts[0])))
right = Path(np.vstack((verts[0], verts[5:10, :], verts[0])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'bevel'
def _set_hexagon1(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x = np.abs(np.cos(5 * np.pi / 6.))
top = Path(np.vstack(([-x, 0], verts[(1, 0, 5), :], [x, 0])))
bottom = Path(np.vstack(([-x, 0], verts[2:5, :], [x, 0])))
left = Path(verts[(0, 1, 2, 3), :])
right = Path(verts[(0, 5, 4, 3), :])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_hexagon2(self):
self._transform = Affine2D().scale(0.5).rotate_deg(30)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x, y = np.sqrt(3) / 4, 3 / 4.
top = Path(verts[(1, 0, 5, 4, 1), :])
bottom = Path(verts[(1, 2, 3, 4), :])
left = Path(np.vstack(([x, y], verts[(0, 1, 2), :],
[-x, -y], [x, y])))
right = Path(np.vstack(([x, y], verts[(5, 4, 3), :], [-x, -y])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_octagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(8)
if not self._half_fill():
self._transform.rotate_deg(22.5)
self._path = polypath
else:
x = np.sqrt(2.) / 4.
half = Path([[0, -1], [0, 1], [-x, 1], [-1, x],
[-1, -x], [-x, -1], [0, -1]])
if fs == 'bottom':
rotate = 90.
elif fs == 'top':
rotate = 270.
elif fs == 'right':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._path = self._alt_path = half
self._alt_transform = self._transform.frozen().rotate_deg(180.0)
self._joinstyle = 'miter'
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _set_vline(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
def _set_hline(self):
self._set_vline()
self._transform = self._transform.rotate_deg(90)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _set_tickleft(self):
self._transform = Affine2D().scale(-1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
def _set_tickright(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _set_tickup(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
def _set_tickdown(self):
self._transform = Affine2D().scale(1.0, -1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_tri_down(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_up(self):
self._set_tri_down()
self._transform = self._transform.rotate_deg(180)
def _set_tri_left(self):
self._set_tri_down()
self._transform = self._transform.rotate_deg(270)
def _set_tri_right(self):
self._set_tri_down()
self._transform = self._transform.rotate_deg(90)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _set_caretdown(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretup(self):
self._set_caretdown()
self._transform = self._transform.rotate_deg(180)
def _set_caretleft(self):
self._set_caretdown()
self._transform = self._transform.rotate_deg(270)
def _set_caretright(self):
self._set_caretdown()
self._transform = self._transform.rotate_deg(90)
_caret_path_base = Path([[-1.0, 0.0], [0.0, -1.5], [1.0, 0]])
def _set_caretdownbase(self):
self._set_caretdown()
self._path = self._caret_path_base
def _set_caretupbase(self):
self._set_caretdownbase()
self._transform = self._transform.rotate_deg(180)
def _set_caretleftbase(self):
self._set_caretdownbase()
self._transform = self._transform.rotate_deg(270)
def _set_caretrightbase(self):
self._set_caretdownbase()
self._transform = self._transform.rotate_deg(90)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_plus(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._plus_path
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_x(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._x_path
_plus_filled_path = Path([(1/3, 0), (2/3, 0), (2/3, 1/3),
(1, 1/3), (1, 2/3), (2/3, 2/3),
(2/3, 1), (1/3, 1), (1/3, 2/3),
(0, 2/3), (0, 1/3), (1/3, 1/3),
(1/3, 0)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
_plus_filled_path_t = Path([(1, 1/2), (1, 2/3), (2/3, 2/3),
(2/3, 1), (1/3, 1), (1/3, 2/3),
(0, 2/3), (0, 1/2), (1, 1/2)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
def _set_plus_filled(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 5.0
self._joinstyle = 'miter'
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._plus_filled_path
else:
# Rotate top half path to support all partitions
if fs == 'top':
rotate, rotate_alt = 0, 180
elif fs == 'bottom':
rotate, rotate_alt = 180, 0
elif fs == 'left':
rotate, rotate_alt = 90, 270
else:
rotate, rotate_alt = 270, 90
self._path = self._plus_filled_path_t
self._alt_path = self._plus_filled_path_t
self._alt_transform = Affine2D().translate(-0.5, -0.5)
self._transform.rotate_deg(rotate)
self._alt_transform.rotate_deg(rotate_alt)
_x_filled_path = Path([(0.25, 0), (0.5, 0.25), (0.75, 0), (1, 0.25),
(0.75, 0.5), (1, 0.75), (0.75, 1), (0.5, 0.75),
(0.25, 1), (0, 0.75), (0.25, 0.5), (0, 0.25),
(0.25, 0)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
_x_filled_path_t = Path([(0.75, 0.5), (1, 0.75), (0.75, 1),
(0.5, 0.75), (0.25, 1), (0, 0.75),
(0.25, 0.5), (0.75, 0.5)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY])
def _set_x_filled(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 5.0
self._joinstyle = 'miter'
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._x_filled_path
else:
# Rotate top half path to support all partitions
if fs == 'top':
rotate, rotate_alt = 0, 180
elif fs == 'bottom':
rotate, rotate_alt = 180, 0
elif fs == 'left':
rotate, rotate_alt = 90, 270
else:
rotate, rotate_alt = 270, 90
self._path = self._x_filled_path_t
self._alt_path = self._x_filled_path_t
self._alt_transform = Affine2D().translate(-0.5, -0.5)
self._transform.rotate_deg(rotate)
self._alt_transform.rotate_deg(rotate_alt)
| mit |
timzhangau/ml_nano | ud120/regression/finance_regression.py | 7 | 2106 | #!/usr/bin/python
"""
Starter code for the regression mini-project.
Loads up/formats a modified version of the dataset
(why modified? we've removed some trouble points
that you'll find yourself in the outliers mini-project).
Draws a little scatterplot of the training/testing data
You fill in the regression code where indicated:
"""
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
dictionary = pickle.load( open("../final_project/final_project_dataset_modified.pkl", "r") )
### list the features you want to look at--first item in the
### list will be the "target" feature
features_list = ["bonus", "salary"]
data = featureFormat( dictionary, features_list, remove_any_zeroes=True)
target, features = targetFeatureSplit( data )
### training-testing split needed in regression, just like classification
from sklearn.cross_validation import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(features, target, test_size=0.5, random_state=42)
train_color = "b"
test_color = "b"
### Your regression goes here!
### Please name it reg, so that the plotting code below picks it up and
### plots it correctly. Don't forget to change the test_color above from "b" to
### "r" to differentiate training points from test points.
### draw the scatterplot, with color-coded training and testing points
import matplotlib.pyplot as plt
for feature, target in zip(feature_test, target_test):
plt.scatter( feature, target, color=test_color )
for feature, target in zip(feature_train, target_train):
plt.scatter( feature, target, color=train_color )
### labels for the legend
plt.scatter(feature_test[0], target_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], target_test[0], color=train_color, label="train")
### draw the regression line, once it's coded
try:
plt.plot( feature_test, reg.predict(feature_test) )
except NameError:
pass
plt.xlabel(features_list[1])
plt.ylabel(features_list[0])
plt.legend()
plt.show()
| mit |
otmaneJai/Zipline | zipline/finance/risk/period.py | 7 | 11932 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from six import iteritems
import pandas as pd
from . import risk
from . risk import (
alpha,
check_entry,
downside_risk,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns, env,
benchmark_returns=None, algorithm_leverages=None):
self.env = env
treasury_curves = env.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = env.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns,
env)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns,
env)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.trading_day_counts = pd.stats.moments.rolling_count(
self.algorithm_returns, self.num_trading_days)
self.mean_algorithm_returns = pd.Series(
index=self.algorithm_returns.index)
for dt, ret in self.algorithm_returns.iteritems():
self.mean_algorithm_returns[dt] = (
self.algorithm_returns[:dt].sum() /
self.trading_day_counts[dt])
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date,
self.env,
)
self.sharpe = self.calculate_sharpe()
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns, env):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = env.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
mar = downside_risk(self.algorithm_returns,
self.mean_algorithm_returns,
self.num_trading_days)
# Hold on to downside risk for debugging purposes.
self.downside_risk = mar
return sortino_ratio(self.algorithm_period_returns,
self.treasury_period_return,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.algorithm_returns) < 2:
return 0.0, 0.0, 0.0, 0.0, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%, if returns are
# greater than -1.0 it will throw an error because you cannot take
# the log of a negative number
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages)
def __getstate__(self):
state_dict = {k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsPeriod saved state \
is too old.")
self.__dict__.update(state)
| apache-2.0 |
scls19fr/pandas-anonymizer | travis_pypi_setup.py | 1 | 3764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'scls19fr/pandas_anonymizer'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| isc |
duncanmmacleod/gwpy | gwpy/plot/tests/test_log.py | 2 | 2784 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.log`
"""
try:
from unittest import mock
except ImportError: # python < 3
import mock
import pytest
from matplotlib import (
__version__ as mpl_version,
rc_context,
)
from .. import log as plot_log
class TestLogFormatter(object):
TEST_CLASS = plot_log.LogFormatter
@classmethod
@pytest.fixture
def formatter(cls):
with mock.patch(
"gwpy.plot.log.LogFormatter._num_ticks",
return_value=2,
):
yield cls.TEST_CLASS()
@pytest.mark.parametrize('x, fmt, result, texresult', [
pytest.param(
0.,
None,
r'$\mathdefault{0}$',
'$0$',
id="0",
),
pytest.param(
1,
None,
r'$\mathdefault{10^{0}}$',
r'$\mathdefault{10^{0}}$' if mpl_version > "3.2.1" else "$10^{0}$",
id="fmt=None",
),
pytest.param(
1,
"%s",
r'$\mathdefault{1}$',
r'$1$',
id="fmt=%s",
),
])
def test_call(self, formatter, x, fmt, result, texresult):
with rc_context(rc={'text.usetex': False}):
assert formatter(x, fmt=fmt) == result
with rc_context(rc={'text.usetex': True}):
assert formatter(x, fmt=fmt) == texresult
@mock.patch( # we don't need this function for this test
"gwpy.plot.log.LogFormatter.set_locs",
mock.MagicMock(),
)
@pytest.mark.parametrize("values, result", [
# normal output
pytest.param(
[1e-1, 1e2, 1e5, 1e8],
[plot_log._math(x) for x in
("10^{-1}", "10^{2}", "10^{5}", "10^{8}")],
id="mpl",
),
# custom output
pytest.param(
[1e-1, 1e0, 1e1, 1e2],
[plot_log._math(x) for x in ("0.1", "1", "10", "100")],
id="gwpy",
),
])
def test_format_ticks(self, formatter, values, result):
assert formatter.format_ticks(values) == result
| gpl-3.0 |
pianomania/scikit-learn | sklearn/gaussian_process/kernels.py | 31 | 67169 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr in dir(self):
if attr.startswith("hyperparameter_"):
r.append(getattr(self, attr))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0],
self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter(
"periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
grigorisg9gr/menpowidgets | menpowidgets/utils.py | 1 | 41578 | from struct import pack as struct_pack
import binascii
import numpy as np
import matplotlib.pyplot as plt
from menpo.compatibility import unicode
from menpo.feature import glyph, sum_channels
def verify_ipython_and_kernel():
r"""
Verify that the current environment is a valid IPython environment that
contains a communication kernel. There is no solid way of identifying a
notebook vs. the QT console, but the QT console will at least visualise the
matplotlib commands without the widget functionality.
Raises
------
ValueError
Unable to import IPython, ipykernel or traitlets
ValueError
No valid IPython kernel found
ValueError
The IPython kernel does not contain a communication manager (the
default IPython shell rather than a notebook or a QT console).
"""
try:
from IPython import get_ipython
from ipykernel.comm import Comm
from traitlets.traitlets import TraitError
if get_ipython() is None:
raise ValueError('menpowidgets can only be used from inside '
'a Jupyter notebook. We were unable to detect '
'an active IPython session. Please re-run your '
'code from inside a Jupyter Notebook.')
try:
Comm()
except TraitError:
raise ValueError('menpowidgets can only be used from inside '
'a Jupyter notebook. We were unable to detect '
'a valid active kernel. Please ensure your code '
'is running inside a Jupyter notebook and not '
'a standalone script or the default IPython '
'shell.')
except ImportError:
raise ValueError('menpowidgets can only be used from inside '
'a Jupyter notebook. We were unable to import '
'IPython, please ensure it is installed.')
def lists_are_the_same(a, b):
r"""
Function that checks if two `lists` have the same elements in the same
order.
Returns
-------
_lists_are_the_same : `bool`
``True`` if the lists are the same.
"""
if len(a) == len(b):
for i, j in zip(a, b):
if i != j:
return False
return True
else:
return False
def rgb2hex(rgb):
return '#' + binascii.hexlify(struct_pack('BBB', *rgb)).decode('ascii')
def decode_colour(colour):
if not (isinstance(colour, str) or isinstance(colour, unicode)):
# we assume that RGB was passed in. Convert it to unicode hex
return rgb2hex(colour)
else:
return str(colour)
def str_is_int(s):
r"""
Function that returns ``True`` if a given `str` is a positive or negative
integer.
Parameters
----------
s : `str`
The command string.
"""
return s.isdigit() or (s.startswith('-') and s[1:].isdigit())
def str_is_float(s):
r"""
Function that returns ``True`` if a given `str` is a positive or negative
float.
Parameters
----------
s : `str`
The command string.
"""
return s.count(".") == 1 and str_is_int(s.replace('.', '', 1))
def parse_int_range_command_with_comma(cmd):
r"""
Function that parses a range/list command for which contains at least one
comma (``,``). The function returns a `list` with the integer values that
are included in the command. It also ignores any redundant whitespaces
that may exist in the command. For example ::
parse_int_range_command_with_comma([1, 2,-3 ])
returns ::
'[1, 2, -3]'
Parameter
---------
cmd : `str`
The command string.
Returns
-------
cmd_list : `list`
The list of integers.
Raises
------
ValueError
Command cannot start or end with ','.
ValueError
Command cannot contain ',,'.
ValueError
Only integers allowed.
"""
if cmd.startswith(',') or cmd.endswith(','):
# if cmd starts or ends with ',', raise an error
raise ValueError("Command cannot start or end with ','.")
else:
# get the parts in between commas
tmp_cmd = cmd.split(',')
# for each part
final_cmd = []
for i in tmp_cmd:
if len(i) == 0:
# this means that there was the ',,' pattern
raise ValueError("Command cannot contain ',,'.")
elif str_is_int(i):
# if it is a positive or negative integer convert it to int
final_cmd.append(int(i))
else:
# else raise an error
raise ValueError("Only integers allowed.")
return final_cmd
def parse_int_range_command(cmd):
r"""
Function that parses a command for list/range. It is able to recognize any
pattern and detect pattern errors. Some characteristic examples are "10",
"[10, 20]", "10, 20", "range(10)", "range(10, 20)", "range(10, 20, 2)",
"1, 5, -3", etc. The function returns a `list` with integers, after
interpreting the given command. It also ignores any redundant whitespaces
that may exist in the command.
Parameters
----------
cmd : `str`
The command string.
Returns
-------
cmd_list : `list`
The list of integers.
Raises
------
ValueError
Command cannot contain floats.
ValueError
Wrong range command.
ValueError
Wrong command.
"""
# remove all redundant spaces from cmd
cmd = cmd.replace(" ", "")
# remove all brackets from cmd
cmd = cmd.replace("[", "")
cmd = cmd.replace("]", "")
# if cmd has '.' then it contains at least a float
if cmd.count(".") > 0:
raise ValueError("Command cannot contain floats.")
# cmd has the form of "range(1, 10, 2)" or "range(10)"
if cmd.startswith("range("):
n_comma = cmd.count(",")
if cmd.endswith(")") and (n_comma == 0 or n_comma == 1 or n_comma == 2):
return eval("list({})".format(cmd))
else:
raise ValueError("Wrong range command.")
# empty command
if cmd == "":
return []
# get number of ','
n_comma = cmd.count(",")
if n_comma > 0:
# parse cmd given that it contains only ','
return parse_int_range_command_with_comma(cmd)
elif n_comma == 0 and str_is_int(cmd):
# cmd has the form of "10"
return [int(cmd)]
else:
raise ValueError("Wrong command.")
def parse_float_range_command_with_comma(cmd):
r"""
Function that parses a range/list command for which contains at least one
comma (``,``). The function returns a `list` with the float values that
are included in the command. It also ignores any redundant whitespaces
that may exist in the command. For example ::
parse_float_range_command_with_comma([1., 2.,-3.])
returns ::
'[1.0, 2.0, -3.0]'
Parameter
---------
cmd : `str`
The command string.
Returns
-------
cmd_list : `list`
The list of floats.
Raises
------
ValueError
Command cannot start or end with ','.
ValueError
Command cannot contain ',,'.
ValueError
Only integers allowed.
"""
if cmd.startswith(',') or cmd.endswith(','):
# if cmd starts or ends with ',', raise an error
raise ValueError("Command cannot start or end with ','.")
else:
# get the parts in between commas
tmp_cmd = cmd.split(',')
# for each part
final_cmd = []
for i in tmp_cmd:
if len(i) == 0:
# this means that there was the ',,' pattern
raise ValueError("Command cannot contain ',,'.")
elif str_is_int(i) or str_is_float(i):
# if it is a positive or negative integer convert it to float
final_cmd.append(float(i))
else:
# else raise an error
raise ValueError("Only floats allowed.")
return final_cmd
def parse_float_range_command(cmd):
r"""
Function that parses a command for list/range. It is able to recognize any
pattern and detect pattern errors. Some characteristic examples are "10.5",
"[10., 20.]", "10., 20.", "range(10.)", "range(10., 20.)",
"range(10., 20., 2.)", "1., 5., -3.2", etc. The function returns a `list`
with integers, after interpreting the given command. It also ignores any
redundant whitespaces that may exist in the command.
Parameters
----------
cmd : `str`
The command string.
Returns
-------
cmd_list : `list`
The list of floats.
Raises
------
ValueError
Wrong range command.
ValueError
Wrong command.
"""
# remove all redundant spaces from cmd
cmd = cmd.replace(" ", "")
# remove all brackets from cmd
cmd = cmd.replace("[", "")
cmd = cmd.replace("]", "")
# cmd has the form of "range(1, 10, 2)" or "range(10)"
if cmd.startswith("range("):
if cmd.endswith(")"):
nums = cmd[6:-1].split(',')
if len(nums) == 1:
arg1 = 0.
arg2 = float(nums[0])
arg3 = 1.
elif len(nums) == 2:
arg1 = float(nums[0])
arg2 = float(nums[1])
arg3 = 1.
elif len(nums) == 3:
arg1 = float(nums[0])
arg2 = float(nums[1])
arg3 = float(nums[2])
else:
raise ValueError("Wrong range command.")
return list(np.arange(arg1, arg2, arg3))
else:
raise ValueError("Wrong range command.")
# empty command
if cmd == "":
return []
# get number of ','
n_comma = cmd.count(",")
if n_comma > 0:
# parse cmd given that it contains only ','
return parse_float_range_command_with_comma(cmd)
elif n_comma == 0 and (str_is_int(cmd) or str_is_float(cmd)):
# cmd has the form of "10"
return [float(cmd)]
else:
raise ValueError("Wrong command.")
def parse_slicing_command_with_comma(cmd, length):
r"""
Function that parses a command for slicing which contains at least one comma
(``,``). The function returns a `list` with the integer values that are
included in the command. It also ignores any redundant whitespaces that may
exist in the command. For example ::
parse_slicing_command_with_comma([1, 2,-3 ], 10)
returns ::
'[1, 2, -3]'
Parameter
---------
cmd : `str`
The command string.
length : `int`
The length of the variable that will get sliced.
Returns
-------
cmd_list : `list`
The list that can be used for slicing after interpreting and evaluating
the provided command.
Raises
------
ValueError
Command cannot start or end with ','.
ValueError
Command cannot contain a pattern of the form ',,'.
ValueError
Command cannot contain numbers greater than {length}.
ValueError
Command must contain positive or negative integers.
"""
if cmd.startswith(',') or cmd.endswith(','):
# if cmd starts or ends with ',', raise an error
raise ValueError("Command cannot start or end with ','.")
else:
# get the parts in between commas
tmp_cmd = cmd.split(',')
# for each part
final_cmd = []
for i in tmp_cmd:
if len(i) == 0:
# this means that there was the ',,' pattern
raise ValueError("Command cannot contain a pattern of the "
"form ',,'.")
elif str_is_int(i):
# if it is a positive or negative integer convert it to int
n = int(i)
if n >= length:
raise ValueError("Command cannot contain numbers greater "
"than {}.".format(length))
else:
final_cmd.append(n)
else:
# else raise an error
raise ValueError("Command must contain positive or negative "
"integers.")
return final_cmd
def parse_slicing_command_with_one_colon(cmd, length):
r"""
Function that parses a command for slicing which contains exactly one colon
(``:``). The function returns a `list` with the integer indices, after
interpreting the slicing command. It also ignores any redundant whitespaces
that may exist in the command. For example ::
parse_slicing_command_with_one_colon(:3, 10)
returns ::
'[0, 1, 2]'
Parameters
----------
cmd : `str`
The command string.
length : `int`
The length of the variable that will get sliced.
Returns
-------
cmd_list : `list`
The list that can be used for slicing after interpreting and evaluating
the provided command.
Raises
------
ValueError
Command cannot contain numbers greater than {length}.
ValueError
Command must contain positive or negative integers.
"""
# this is necessary in order to return ranges with negative slices
tmp_list = list(range(length))
if cmd.startswith(':'):
# cmd has the form ":3" or ":"
if len(cmd) > 1:
# cmd has the form ":3"
i = cmd[1:]
if str_is_int(i):
n = int(i)
if n > length:
raise ValueError("Command cannot contain numbers greater "
"than {}.".format(length))
else:
return tmp_list[:n]
else:
raise ValueError("Command must contain integers.")
else:
# cmd is ":"
return tmp_list
elif cmd.endswith(':'):
# cmd has the form "3:" or ":"
if len(cmd) > 1:
# cmd has the form "3:"
i = cmd[:-1]
if str_is_int(i):
n = int(i)
if n >= length:
raise ValueError("Command cannot contain numbers greater "
"than {}.".format(length))
else:
return tmp_list[n:]
else:
raise ValueError("Command must contain integers.")
else:
# cmd is ":"
return tmp_list
else:
# cmd has the form "3:10"
# get the parts before and after colon
tmp_cmd = cmd.split(':')
start = tmp_cmd[0]
end = tmp_cmd[1]
if str_is_int(start) and str_is_int(end):
start = int(start)
end = int(end)
if start >= length or end > length:
raise ValueError("Command cannot contain numbers greater "
"than {}.".format(length))
else:
return tmp_list[start:end]
else:
raise ValueError("Command must contain integers.")
def parse_slicing_command_with_two_colon(cmd, length):
r"""
Function that parses a command for slicing which contains exactly two colons
(``:``). The function returns a `list` with the integer indices, after
interpreting the slicing command. It also ignores any redundant whitespaces
that may exist in the command. For example ::
parse_slicing_command_with_two_colon(::3, 10)
returns ::
'[0, 3, 6, 9]'
Parameters
----------
cmd : `str`
The command string.
length : `int`
The length of the variable that will get sliced.
Returns
-------
cmd_list : `list`
The list that can be used for slicing after interpreting and evaluating
the provided command.
Raises
------
ValueError
Command cannot contain numbers greater than {length}.
ValueError
Command must contain positive or negative integers.
"""
# this is necessary in order to return ranges with negative slices
tmp_list = list(range(length))
if cmd.startswith('::'):
# cmd has the form "::3" or "::"
if len(cmd) > 2:
# cmd has the form "::3"
i = cmd[2:]
if str_is_int(i):
n = int(i)
return tmp_list[::n]
else:
raise ValueError("Command must contain integers.")
else:
# cmd is "::"
return tmp_list
elif cmd.endswith('::'):
# cmd has the form "3::" or "::"
if len(cmd) > 2:
# cmd has the form "3::"
i = cmd[:-2]
if str_is_int(i):
n = int(i)
if n >= length:
raise ValueError("Command cannot contain numbers greater "
"than {}.".format(length))
else:
return tmp_list[n::]
else:
raise ValueError("Command must contain integers.")
else:
# cmd is "::"
return tmp_list
else:
# cmd has the form "1:8:2"
# get the parts in between colons
tmp_cmd = cmd.split(':')
start = tmp_cmd[0]
end = tmp_cmd[1]
step = tmp_cmd[2]
if str_is_int(start) and str_is_int(end) and str_is_int(step):
start = int(start)
end = int(end)
step = int(step)
if start >= length or end > length:
raise ValueError("Command cannot contain numbers greater "
"than {}.".format(length))
else:
return tmp_list[start:end:step]
else:
raise ValueError("Command must contain integers.")
def parse_slicing_command(cmd, length):
r"""
Function that parses a command for slicing. It is able to recognize any
slicing pattern of Python and detect pattern errors. Some characteristic
examples are ":3", ":-2", "3:", "::3", "3::", "1:8", "1:8:2", "1, 5, -3",
"range(10)", "range("1, 10, 2)" etc. The function returns a `list` with the
integer indices, after interpreting the slicing command. It also ignores any
redundant whitespaces that may exist in the command.
Parameters
----------
cmd : `str`
The command string.
length : `int`
The length of the variable that will get sliced.
Returns
-------
cmd_list : `list`
The list that can be used for slicing after interpreting and evaluating
the provided command.
Raises
------
ValueError
Command cannot contain numbers greater than {length}.
ValueError
Command must contain positive or negative integers.
"""
# remove all redundant spaces from cmd
cmd = cmd.replace(" ", "")
# remove all brackets from cmd
cmd = cmd.replace("[", "")
cmd = cmd.replace("]", "")
# cmd has the form of "range(1, 10, 2)" or "range(10)"
if cmd.startswith("range("):
if cmd.endswith(")"):
cmd = cmd[6:-1]
if cmd.count(",") > 0:
cmd = cmd.replace(",", ":")
else:
cmd = "0:" + cmd
else:
raise ValueError("Wrong command.")
# empty command
if cmd == "":
return []
# get number of ':' and number of ','
n_colon = cmd.count(":")
n_comma = cmd.count(",")
if n_comma > 0 and n_colon == 0:
# parse cmd given that it contains only ','
return parse_slicing_command_with_comma(cmd, length)
elif n_comma == 0 and n_colon > 0:
# parse cmd given that it contains only ':'
if n_colon == 1:
return parse_slicing_command_with_one_colon(cmd, length)
elif n_colon == 2:
return parse_slicing_command_with_two_colon(cmd, length)
else:
raise ValueError("More than 2 ':'.")
elif n_comma == 0 and n_colon == 0:
# cmd has the form of "10"
if str_is_int(cmd):
n = int(cmd)
if n >= length:
raise ValueError("Cannot contain numbers greater "
"than {}".format(length))
else:
return [n]
else:
raise ValueError("Wrong command.")
else:
raise ValueError("Wrong command.")
def list_has_constant_step(l):
r"""
Function that checks if a list of integers has a constant step between them
and returns the step.
Parameters
----------
l : `list`
The list to check.
Returns
-------
has_constant_step : `bool`
``True`` if the `list` elements have a constant step between them.
step : `int`
The step value. ``None`` if `has_constant_step` is ``False``.
"""
if len(l) <= 1:
return False, None
step = l[1] - l[0]
s = step
i = 2
while s == step and i < len(l):
s = l[i] - l[i - 1]
i += 1
if i == len(l) and s == step:
return True, step
else:
return False, None
def sample_colours_from_colourmap(n_colours, colour_map):
import matplotlib.pyplot as plt
cm = plt.get_cmap(colour_map)
colours = []
for i in range(n_colours):
c = cm(1.*i/n_colours)[:3]
colours.append(decode_colour([int(i * 255) for i in c]))
return colours
def extract_group_labels_from_landmarks(landmark_manager):
groups_keys = None
labels_keys = None
if landmark_manager.has_landmarks:
groups_keys = landmark_manager.group_labels
labels_keys = [landmark_manager[g].labels for g in groups_keys]
return groups_keys, labels_keys
def extract_groups_labels_from_image(image):
r"""
Function that extracts the groups and labels from an image's landmarks.
Parameters
----------
image : :map:`Image` or subclass
The input image object.
Returns
-------
group_keys : `list` of `str`
The list of landmark groups found.
labels_keys : `list` of `str`
The list of lists of each landmark group's labels.
"""
groups_keys, labels_keys = extract_group_labels_from_landmarks(image.landmarks)
return groups_keys, labels_keys
def render_image(image, renderer, render_landmarks, image_is_masked,
masked_enabled, channels, glyph_enabled, glyph_block_size,
glyph_use_negative, sum_enabled, group, with_labels,
render_lines, line_style, line_width, line_colour,
render_markers, marker_style, marker_size,
marker_edge_width, marker_edge_colour, marker_face_colour,
render_numbering, numbers_font_name, numbers_font_size,
numbers_font_style, numbers_font_weight,
numbers_font_colour, numbers_horizontal_align,
numbers_vertical_align, legend_n_columns,
legend_border_axes_pad, legend_rounded_corners,
legend_title, legend_horizontal_spacing, legend_shadow,
legend_location, legend_font_name, legend_bbox_to_anchor,
legend_border, legend_marker_scale,
legend_vertical_spacing, legend_font_weight,
legend_font_size, render_legend, legend_font_style,
legend_border_padding, figure_size, render_axes,
axes_font_name, axes_font_size, axes_font_style,
axes_font_weight, axes_x_limits, axes_y_limits, axes_x_ticks,
axes_y_ticks, interpolation, alpha, cmap_name):
# This makes the code shorter for dealing with masked images vs non-masked
# images
mask_arguments = ({'masked': masked_enabled} if image_is_masked else {})
# plot
if render_landmarks and group is not None:
# show image with landmarks
if glyph_enabled:
# image, landmarks, masked, glyph
renderer = glyph(image, vectors_block_size=glyph_block_size,
use_negative=glyph_use_negative,
channels=channels).view_landmarks(
group=group, with_labels=with_labels, without_labels=None,
figure_id=renderer.figure_id, new_figure=False,
render_lines=render_lines, line_colour=line_colour,
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size,
interpolation=interpolation, alpha=alpha, cmap_name=cmap_name,
**mask_arguments)
elif sum_enabled:
# image, landmarks, masked, sum
renderer = sum_channels(image, channels=channels).view_landmarks(
group=group, with_labels=with_labels, without_labels=None,
figure_id=renderer.figure_id, new_figure=False,
render_lines=render_lines, line_colour=line_colour,
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size,
interpolation=interpolation, alpha=alpha, cmap_name=cmap_name,
**mask_arguments)
else:
renderer = image.view_landmarks(
channels=channels, group=group, with_labels=with_labels,
without_labels=None, figure_id=renderer.figure_id,
new_figure=False, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size,
interpolation=interpolation, alpha=alpha, cmap_name=cmap_name,
**mask_arguments)
else:
# either there are not any landmark groups selected or they won't
# be displayed
if glyph_enabled:
# image, not landmarks, masked, glyph
renderer = glyph(image, vectors_block_size=glyph_block_size,
use_negative=glyph_use_negative,
channels=channels).view(
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, figure_size=figure_size,
interpolation=interpolation, alpha=alpha, cmap_name=cmap_name,
**mask_arguments)
elif sum_enabled:
# image, not landmarks, masked, sum
renderer = sum_channels(image, channels=channels).view(
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, figure_size=figure_size,
interpolation=interpolation, alpha=alpha, cmap_name=cmap_name,
**mask_arguments)
else:
# image, not landmarks, masked, not glyph/sum
renderer = image.view(
channels=channels, render_axes=render_axes,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, figure_size=figure_size,
interpolation=interpolation, alpha=alpha, cmap_name=cmap_name,
**mask_arguments)
# show plot
plt.show()
return renderer
def render_patches(patches, patch_centers, patches_indices, offset_index,
renderer, background, render_patches, channels,
glyph_enabled, glyph_block_size, glyph_use_negative,
sum_enabled, interpolation, cmap_name, alpha,
render_patches_bboxes, bboxes_line_colour,
bboxes_line_style, bboxes_line_width, render_centers,
render_lines, line_colour, line_style, line_width,
render_markers, marker_style, marker_size,
marker_face_colour, marker_edge_colour,
marker_edge_width, render_numbering,
numbers_horizontal_align, numbers_vertical_align,
numbers_font_name, numbers_font_size, numbers_font_style,
numbers_font_weight, numbers_font_colour, render_axes,
axes_font_name, axes_font_size, axes_font_style,
axes_font_weight, axes_x_limits, axes_y_limits,
axes_x_ticks, axes_y_ticks, figure_size):
from menpo.transform import UniformScale
from menpo.visualize import view_patches
if glyph_enabled and render_patches:
# compute glyph size
glyph_patch0 = glyph(patches[0, offset_index, ...],
vectors_block_size=glyph_block_size,
use_negative=glyph_use_negative)
# compute glyph of each patch
glyph_patches = np.zeros((patches.shape[0], 1, 1, glyph_patch0.shape[1],
glyph_patch0.shape[2]))
glyph_patches[0, 0, ...] = glyph_patch0
for i in range(1, patches.shape[0]):
glyph_patches[i, 0, ...] = glyph(
patches[i, offset_index, ...],
vectors_block_size=glyph_block_size,
use_negative=glyph_use_negative)
# correct patch centers
glyph_patch_centers = UniformScale(glyph_block_size, 2).apply(
patch_centers)
# visualize glyph patches
renderer = view_patches(
glyph_patches, glyph_patch_centers, patches_indices=patches_indices,
offset_index=0, figure_id=renderer.figure_id, new_figure=False,
background=background, render_patches=render_patches, channels=0,
interpolation=interpolation, cmap_name=cmap_name, alpha=alpha,
render_patches_bboxes=render_patches_bboxes,
bboxes_line_colour=bboxes_line_colour,
bboxes_line_style=bboxes_line_style,
bboxes_line_width=bboxes_line_width,
render_centers=render_centers, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
figure_size=figure_size)
elif sum_enabled and render_patches:
# compute sum of each patch
sum_patches = np.zeros((patches.shape[0], 1, 1, patches.shape[3],
patches.shape[4]))
for i in patches_indices:
sum_patches[i, 0, ...] = sum_channels(
patches[i, offset_index, ...], channels=channels)
# visualize sum patches
renderer = view_patches(
sum_patches, patch_centers, patches_indices=patches_indices,
offset_index=0, figure_id=renderer.figure_id, new_figure=False,
background=background, render_patches=render_patches, channels=0,
interpolation=interpolation, cmap_name=cmap_name, alpha=alpha,
render_patches_bboxes=render_patches_bboxes,
bboxes_line_colour=bboxes_line_colour,
bboxes_line_style=bboxes_line_style,
bboxes_line_width=bboxes_line_width,
render_centers=render_centers, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
figure_size=figure_size)
else:
renderer = view_patches(
patches, patch_centers, patches_indices=patches_indices,
offset_index=offset_index, figure_id=renderer.figure_id,
new_figure=False, background=background,
render_patches=render_patches, channels=channels,
interpolation=interpolation, cmap_name=cmap_name, alpha=alpha,
render_patches_bboxes=render_patches_bboxes,
bboxes_line_colour=bboxes_line_colour,
bboxes_line_style=bboxes_line_style,
bboxes_line_width=bboxes_line_width,
render_centers=render_centers, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
figure_size=figure_size)
# show plot
plt.show()
return renderer
| bsd-3-clause |
peterpolidoro/elf | tests/volume_to_adc.py | 4 | 5108 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import matplotlib.pyplot as plot
import numpy
from numpy.polynomial.polynomial import polyfit,polyadd,Polynomial
import yaml
INCHES_PER_ML = 0.078
VOLTS_PER_ADC_UNIT = 0.0049
def load_numpy_data(path):
with open(path,'r') as fid:
header = fid.readline().rstrip().split(',')
dt = numpy.dtype({'names':header,'formats':['S25']*len(header)})
numpy_data = numpy.loadtxt(path,dtype=dt,delimiter=",",skiprows=1)
return numpy_data
# -----------------------------------------------------------------------------------------
if __name__ == '__main__':
# Load VA data
data_file = 'hall_effect_data_va.csv'
hall_effect_data_va = load_numpy_data(data_file)
distances_va = numpy.float64(hall_effect_data_va['distance'])
A1_VA = numpy.float64(hall_effect_data_va['A1'])
A9_VA = numpy.float64(hall_effect_data_va['A9'])
A4_VA = numpy.float64(hall_effect_data_va['A4'])
A12_VA = numpy.float64(hall_effect_data_va['A12'])
A2_VA = numpy.float64(hall_effect_data_va['A2'])
A10_VA = numpy.float64(hall_effect_data_va['A10'])
A5_VA = numpy.float64(hall_effect_data_va['A5'])
A13_VA = numpy.float64(hall_effect_data_va['A13'])
# Massage VA data
volumes_va = distances_va/INCHES_PER_ML
A1_VA = numpy.reshape(A1_VA,(-1,1))
A9_VA = numpy.reshape(A9_VA,(-1,1))
A4_VA = numpy.reshape(A4_VA,(-1,1))
A12_VA = numpy.reshape(A12_VA,(-1,1))
A2_VA = numpy.reshape(A2_VA,(-1,1))
A10_VA = numpy.reshape(A10_VA,(-1,1))
A5_VA = numpy.reshape(A5_VA,(-1,1))
A13_VA = numpy.reshape(A13_VA,(-1,1))
data_va = numpy.hstack((A1_VA,A9_VA,A4_VA,A12_VA,A2_VA,A10_VA,A5_VA,A13_VA))
data_va = data_va/VOLTS_PER_ADC_UNIT
# Load OA data
data_file = 'hall_effect_data_oa.csv'
hall_effect_data_oa = load_numpy_data(data_file)
distances_oa = numpy.float64(hall_effect_data_oa['distance'])
A9_OA = numpy.float64(hall_effect_data_oa['A9'])
A10_OA = numpy.float64(hall_effect_data_oa['A10'])
A11_OA = numpy.float64(hall_effect_data_oa['A11'])
A12_OA = numpy.float64(hall_effect_data_oa['A12'])
# Massage OA data
volumes_oa = distances_oa/INCHES_PER_ML
A9_OA = numpy.reshape(A9_OA,(-1,1))
A10_OA = numpy.reshape(A10_OA,(-1,1))
A11_OA = numpy.reshape(A11_OA,(-1,1))
A12_OA = numpy.reshape(A12_OA,(-1,1))
data_oa = numpy.hstack((A9_OA,A10_OA,A11_OA,A12_OA))
data_oa = data_oa/VOLTS_PER_ADC_UNIT
# Create figure
fig = plot.figure()
fig.suptitle('hall effect sensors',fontsize=14,fontweight='bold')
fig.subplots_adjust(top=0.85)
colors = ['b','g','r','c','m','y','k','b']
markers = ['o','o','o','o','o','o','o','^']
# Axis 1
ax1 = fig.add_subplot(121)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax1.plot(volumes_va,data_va[:,column_index],marker=marker,linestyle='--',color=color)
for column_index in range(0,data_oa.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax1.plot(volumes_oa,data_oa[:,column_index],marker=marker,linestyle='--',color=color)
ax1.set_xlabel('volume (ml)')
ax1.set_ylabel('mean signals (ADC units)')
ax1.grid(True)
# Axis 2
for column_index in range(0,data_va.shape[1]):
data_va[:,column_index] -= data_va[:,column_index].min()
MAX_VA = 120
data_va = data_va[numpy.all(data_va<MAX_VA,axis=1)]
length = data_va.shape[0]
volumes_va = volumes_va[-length:]
# for column_index in range(0,data_oa.shape[1]):
# data_oa[:,column_index] -= data_oa[:,column_index].max()
ax2 = fig.add_subplot(122)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax2.plot(volumes_va,data_va[:,column_index],marker=marker,linestyle='--',color=color)
# for column_index in range(0,data_oa.shape[1]):
# color = colors[column_index]
# marker = markers[column_index]
# ax2.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color)
ax2.set_xlabel('volume (ml)')
ax2.set_ylabel('offset mean signals (ADC units)')
ax2.grid(True)
order = 3
sum_va = None
for column_index in range(0,data_va.shape[1]):
coefficients_va = polyfit(volumes_va,data_va[:,column_index],order)
if sum_va is None:
sum_va = coefficients_va
else:
sum_va = polyadd(sum_va,coefficients_va)
average_va = sum_va/data_va.shape[1]
round_digits = 8
average_va = [round(i,round_digits) for i in average_va]
with open('volume_to_adc_va.yaml', 'w') as f:
yaml.dump(average_va, f, default_flow_style=False)
poly_va = Polynomial(average_va)
ys_va = poly_va(volumes_va)
ax2.plot(volumes_va,ys_va,'r',linewidth=3)
ax2.text(0.5,110,r'$s = c_0 + c_1v + c_2v^2 + c_3v^3$',fontsize=20)
ax2.text(0.5,100,str(average_va),fontsize=18,color='r')
plot.show()
| bsd-3-clause |
jmfranck/pyspecdata | pyspecdata/latexscripts.py | 1 | 15705 | r'''Provides the ``pdflatex_notebook_wrapper`` shell/dos command, which you run
instead of your normal Latex command to build a lab notebook.
The results of python environments are **cached** and **only re-run if the code changes**,
even if the python environments are moved around.
This makes the compilation of a Latex lab notebook extremely efficient.
'''
from .datadir import getDATADIR, _my_config
#from .datadir import get_notebook_dir
from distutils.spawn import find_executable
import os.path
import hashlib
import numpy
import sys
import time
import platform
from shutil import copy2 as sh_copy2
from subprocess import Popen,PIPE,check_output
haswatchdog = False
if haswatchdog:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
if haswatchdog:
class handle_dir_change (FileSystemEventHandler):
def on_modified(self,event):
#print "called 'on_modified'"
if event is None:# if I called it manually
runornot = True
print("Scons signaled manually")
else:
#print "in event:",dir(event)
#print event.event_type
#print event.is_directory
#print event.key
#print event.src_path
if event.src_path in ['.' + os.path.sep + j for j in self.dependencies]:
print("Scons signaled on",event.src_path,", which is a dependency")
runornot = True
else:
print("Modified",event.src_path,"which is not a dependency")
runornot = False
if runornot:
outstring = check_output(['scons','--tree=all'] + sys.argv[1:],shell = True)
#print "outstring:\n",outstring
self.dependencies = []
for line in outstring.split(os.linesep):
# here, I grab the tree, and then I should be able to use watchdog to implement latexmk-like functionality
x = line.find('+-')
if x > -1:
x += 1
while line[x] == '-':
x += 1
self.dependencies.append(line[x:])
self.dependencies.pop(0) # this is the file itself
return self.dependencies
def det_new_pdf_name(thisargv):
'based on an original tex or pdf name, determine the original basename (i.e., no extension), as well as one with the final word after the underscore removed'
tex_basename = list(filter(lambda x: x[0] != '-',
thisargv))[-1]
tex_basename = os.path.basename(tex_basename)
if tex_basename[-4:] == '.tex':
tex_basename = tex_basename[:-4]
elif tex_basename[-4:] == '.pdf':
tex_basename = tex_basename[:-4]
orig_tex_basename = tex_basename
tex_basename = tex_basename.split('_')
if (len(tex_basename) > 1) and (tex_basename[-1] in ['basic','fancy','georgia']):
new_pdf_basename = '_'.join(tex_basename[:-1])
else:
new_pdf_basename = '_'.join(tex_basename)
return orig_tex_basename,new_pdf_basename
def genconfig():
'''creates a template configuration directory'''
_my_config._config_parser = None # this supresses the output
if platform.platform().startswith('Windows'):
hide_start = '_' # the default hidden/config starter for vim, mingw applications, etc
else:
hide_start = '.'
filename = os.path.join(os.path.expanduser('~'),hide_start+'pyspecdata')
with open(filename,'w',encoding='utf-8') as fp:
fp.write('[General]\n')
fp.write('# replace the following with your default data location (this is just a suggestion)\n')
possible_data = [x for x in next(os.walk(os.path.expanduser('~')))[1] if 'data' in x.lower() and 'app' not in x.lower()]
fp.write('data_directory = ')
if len(possible_data) > 0:
fp.write(os.path.join(os.path.expanduser('~'),possible_data[0])+'\n')
else:
fp.write(os.path.join(os.path.expanduser('~'),'???')+'\n')
fp.write('\n')
fp.write('[ExpTypes]\n')
fp.write('# in this section, you can place specific subdirectories (for faster access, or if they live elsewhere\n')
print("now edit "+filename)
def wraplatex():
'''runs the python scripts after running latex also creates a copy of latex without the final portion under the underscore
This prevents the viewer from hanging while it's waiting for a refresh.
This can be used in combination with wrapviewer() and latexmk by using a ``~/.latexmkrc`` file that looks like this:
If you pass the ``--xelatex`` argument, xelatex is used instead of pdflatex
(note that if you're using latexmk, you need to add this in the latexmkrc file).
.. code-block:: perl
$pdflatex=q/pdflatex_notebook_wrapper %O -synctex=1 %S/;# calls this function
$pdf_previewer=q/pdflatex_notebook_view_wrapper/;# calls the wrapviewer function
'''
proc_args = list(sys.argv)
if '--xelatex' in proc_args:
proc_args.pop(proc_args.index('--xelatex'))
use_xelatex = True
else:
use_xelatex = False
print("about to update the python script outputs....")
orig_tex_basename,new_pdf_basename = det_new_pdf_name(proc_args)
with open(orig_tex_basename+'.tex','r',encoding='utf-8') as fp:
thisline = fp.readline()
while thisline.startswith('%!'):# in case we want to allow multiple directives
if 'xelatex' in thisline:
use_xelatex = True
elif 'pdflatex' in thisline:
use_xelatex = False
thisline = fp.readline()
if use_xelatex:
shellcmd = ' '.join(['xelatex']+proc_args[1:])
else:
shellcmd = ' '.join(['pdflatex']+proc_args[1:])
print("executing:",shellcmd)
os.system(shellcmd)
print("executing:",'update_notebook_pythonscripts')
os.system('update_notebook_pythonscripts')
if orig_tex_basename != new_pdf_basename:
print("preparing to:",'cp '+orig_tex_basename+'.pdf '+new_pdf_basename+'.pdf')
os.system('cp '+orig_tex_basename+'.pdf '+new_pdf_basename+'.pdf')
os.system('cp '+orig_tex_basename+'.synctex.gz '
+new_pdf_basename+'.synctex.gz')
return
def wrapviewer():
'see :func:`wraplatex <pyspecdata.latexscripts.wraplatex>`'
pdf_basename = list(filter(lambda x: x[0] != '-',
sys.argv))[-1]
orig_tex_basename,new_pdf_basename = det_new_pdf_name(sys.argv)
if os.name == 'posix':
# {{{ this plays the role of the function that I used to call "new_evince" with argument "b"
which_command = 'b'
full_pdf_name = new_pdf_basename+'.pdf'
full_tex_name = orig_tex_basename+'.tex'
if which_command is 'f':#forward
# no longer used, but make this functional, in case I want it later
#3/29/14 -- replaced '+sys.argv[2]+' w/ default
cmdstring = 'evince_vim_dbus.py EVINCE '+full_pdf_name+' 1 '+full_tex_name
print(cmdstring)
os.system(cmdstring)
elif which_command is 'i':#inverse
cmdstring = 'evince_vim_dbus.py GVIM default '+full_pdf_name+' '+full_tex_name
print(cmdstring)
os.system(cmdstring)
elif which_command is 'b':#both
cmdstring = '~/silentfork.sh evince_vim_dbus.py EVINCE '+full_pdf_name+' 1 '+full_tex_name
print(cmdstring)
os.system(cmdstring)
time.sleep(0.75)
cmdstring = '~/silentfork.sh evince_vim_dbus.py GVIM default '+full_pdf_name+' '+full_tex_name
print(cmdstring)
os.system(cmdstring)
# }}}
else:
os.system('start sumatrapdf -reuse-instance '+new_pdf_basename+'.pdf')
if new_pdf_basename == 'lists':
os.system('cp lists.pdf "'+os.path.join(os.path.expanduser('~'),'Dropbox','lists.pdf'))
return
def script_filename(scriptnum_as_str):
return get_scripts_dir()+scriptnum_as_str+'.py'
def cached_filename(hashstring,returndir = False):
r'''this sets the format for where the cached file is stored
we use the first two characters as directory names (so there are just 16 of them'''
return get_scripts_dir() + 'cache' + os.path.sep + hashstring[0] + os.path.sep + hashstring[1] + os.path.sep + hashstring[2:] + '.tex'
def grab_script_string(scriptnum_as_str):
fp_script = open(script_filename(scriptnum_as_str),encoding='utf-8')
#print "opening",script_filename(scriptnum_as_str)
script_string = ''
reading = False
for scriptline in fp_script.readlines():
if reading:
script_string += scriptline
if scriptline.strip() == '### start checksum ###':
reading = True
fp_script.close()
return script_string
def check_image_path():
image_path = os.path.sep.join([os.getcwd(),'auto_figures'])
if not os.path.exists(image_path):
os.mkdir(image_path)
return
def get_scripts_dir():
script_path = os.path.sep.join([os.getcwd(),'scripts',''])
if not os.path.exists(script_path):
os.mkdir(os.path.sep.join([os.getcwd(),'scripts']))
return script_path
def sha_string(script):
'convert the sha hash to a string'
s = hashlib.sha256()
s.update(script.encode('utf-8'))
hasharray = numpy.fromstring(s.digest(),'>u8')
del s
return ''.join(['%016x'%x for x in list(hasharray)])
def cache_output_if_needed(scriptnum_as_str,hashstring,showcode = False,show_error = True):
r'if needed, run the python script numbered by scriptnum_as_str that hashes to hashstring, and output the result to the cache ONLY'
output_fname = cached_filename(hashstring)
script_fname = script_filename(scriptnum_as_str)
# {{{ interpret the "NOerr" directive
with open(script_fname,'r',encoding='utf-8') as fp:
firstline = fp.readline()
if firstline.startswith('### NOerr'): show_error = False
# }}}
if os.path.exists(output_fname):
print(output_fname,"already exists, so I will just use it")
else:
print("no cached file")
if not os.path.exists(os.path.dirname(output_fname)):
os.makedirs(os.path.dirname(output_fname))
fp_out = open(output_fname,'w',encoding='utf-8')
#fp_out.write(r'{\color{red}script: %d}'%script_number+'\n')
if showcode:
fp_out.write(r'\begin{lstlisting}'+'\n')
fp_out.write(''.join(script))
fp_out.write(r'\end{lstlisting}'+'\n')
temp = os.environ
print("about to run python")
python_name = 'python'
if os.name == 'posix':
temp.update({'PYTHON_DATA_DIR':getDATADIR()})
# on mac, we frequently want to use python2
if find_executable('python2'):
python_name = 'python2'
proc = Popen([python_name,'-W','ignore',script_fname],
stdout = PIPE,
stdin = PIPE,
stderr = PIPE,
env = temp)
else: #windows should give os.name == 'nt'
temp.update({'MPLCONFIGDIR':os.getcwd()+'/.matplotlib',
'PYTHON_DATA_DIR':getDATADIR()})
proc = Popen([python_name,'-W','ignore',script_fname],
stdout = PIPE,
stdin = PIPE,
stderr = PIPE,
env = temp)
stdoutdata,stderrdata = [j.decode('utf-8') for j in proc.communicate()]
print("ran python")
if os.name != 'posix':
stdoutdata = stdoutdata.replace('\r','')
if stderrdata is not None:
stderrdata = stderrdata.replace('\r','')
#fp_out.write('File has not been run, output:\n\n')
fp_out.write("%%% Generated automatically by python for script {:s} hashstring {:s}".format(scriptnum_as_str,hashstring))
fp_out.write("\n")
fp_out.write(stdoutdata)
#fp_out.write('\n\n$\\Rightarrow$ end of output\n\n')
if show_error:
if stderrdata is not None and len(stderrdata) > 0:
fp_out.write("\\quad\\\\ {\\small {\\color{red} {\\tt ERRORS---------------------} "+r'\makeatletter\fn{scripts/\[email protected]}\makeatother'+"}}\\\\\n")
fp_out.write("\n\nThe current directory is \\verb|%s|\n\n"%os.getcwd())
fp_out.write("\n\nThe data directory was set to: \\verb|"+getDATADIR()+"|\n\n")
#fp_out.write("\n\nThe notebook directory was set to: \\verb|"+get_notebook_dir()+"|\n\n")
fp_out.write("\\begin{tiny}\n")
fp_out.write("\\begin{verbatim}\n")
#fp_out.write('...\n\t'.join(textwrap.wrap(stderrdata,80)))
fp_out.write(stderrdata)
fp_out.write("\\end{verbatim}\n")
fp_out.write("\\end{tiny}\n")
fp_out.write("{\\small {\\color{red} {\\tt ---------------------------}}}\\\\\n")
fp_out.close()
return
def flush_script(number):
tex_name = os.path.normpath(os.path.join(os.getcwd(),'scripts',number+'.tex'))
print("removing:",tex_name)
if os.path.exists(tex_name):
os.remove(tex_name)
file_name = cached_filename(sha_string(grab_script_string(number)))
print("removing:",file_name)
if os.path.exists(file_name):
os.remove(file_name)
print("does it still exist?",os.path.exists(file_name))
else:
print("there is no cache for script",number)
return
if haswatchdog:
def repeat_scons():
r'This is just a function to run scons over and over again (e.g. so it can replace latexmk)'
for j in os.listdir('.'):
if j.endswith('.sconslog'):
os.remove(j)# by removing these, we force the PDF reader to open
observer = Observer()
myhandler = handle_dir_change()
dependencies = myhandler.on_modified(None) # run once manually
for j in dependencies:
fname = '.'+ os.path.sep + j
#print "adding",os.path.dirname(fname),"for",fname
observer.schedule(myhandler,path = os.path.dirname(fname),recursive = False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
return
def main():
r'''This looks for `scripts/scriptsUsed.csv` inside the notebook directory, and checks whether or not it should be run
if a command line argument of "flush" is passed, it flushes that script number from the cache'''
check_image_path()
if len(sys.argv) > 2:
if sys.argv[1] == 'flush':
if len(sys.argv) == 3:
flush_script(sys.argv[2])
exit()
elif len(sys.argv) == 4:
for j in range(int(sys.argv[2]),int(sys.argv[3])+1):
flush_script(str(j))
exit()
else:
raise RuntimeError("What did you pass me???")
fp = open(get_scripts_dir() + 'scriptsUsed.csv')
for line in fp.readlines():
scriptnum_as_str = line.strip()
outname = scriptnum_as_str + '.tex'
hashstring = sha_string(grab_script_string(scriptnum_as_str))
print("reading script",script_filename(scriptnum_as_str),"which has hash",hashstring)
cache_output_if_needed(scriptnum_as_str,hashstring)
# always pull the output from the cache
sh_copy2(cached_filename(hashstring),get_scripts_dir()+outname)
fp.close()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/tseries/offsets/test_yqm_offsets.py | 6 | 50949 | """
Tests for Year, Quarter, and Month-based DateOffset subclasses
"""
from datetime import datetime
import pytest
import pandas as pd
from pandas import Timestamp
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tseries.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
# --------------------------------------------------------------------
# Misc
def test_quarterly_dont_normalize():
date = datetime(2012, 3, 31, 5, 30)
offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)
for klass in offsets:
result = date + klass()
assert result.time() == date.time()
@pytest.mark.parametrize("n", [-2, 1])
@pytest.mark.parametrize(
"cls",
[
MonthBegin,
MonthEnd,
BMonthBegin,
BMonthEnd,
QuarterBegin,
QuarterEnd,
BQuarterBegin,
BQuarterEnd,
YearBegin,
YearEnd,
BYearBegin,
BYearEnd,
],
)
def test_apply_index(cls, n):
offset = cls(n=n)
rng = pd.date_range(start="1/1/2000", periods=100000, freq="T")
ser = pd.Series(rng)
res = rng + offset
assert res.freq is None # not retained
assert res[0] == rng[0] + offset
assert res[-1] == rng[-1] + offset
res2 = ser + offset
# apply_index is only for indexes, not series, so no res2_v2
assert res2.iloc[0] == ser.iloc[0] + offset
assert res2.iloc[-1] == ser.iloc[-1] + offset
@pytest.mark.parametrize(
"offset", [QuarterBegin(), QuarterEnd(), BQuarterBegin(), BQuarterEnd()]
)
def test_on_offset(offset):
dates = [
datetime(2016, m, d)
for m in [10, 11, 12]
for d in [1, 2, 3, 28, 29, 30, 31]
if not (m == 11 and d == 31)
]
for date in dates:
res = offset.is_on_offset(date)
slow_version = date == (date + offset) - offset
assert res == slow_version
# --------------------------------------------------------------------
# Months
class TestMonthBegin(Base):
_offset = MonthBegin
offset_cases = []
# NOTE: I'm not entirely happy with the logic here for Begin -ss
# see thread 'offset conventions' on the ML
offset_cases.append(
(
MonthBegin(),
{
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 2, 1): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1),
},
)
)
offset_cases.append(
(
MonthBegin(0),
{
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 12, 3): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1),
},
)
)
offset_cases.append(
(
MonthBegin(2),
{
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 12, 28): datetime(2008, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
MonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 5, 31): datetime(2008, 5, 1),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 1, 2): datetime(2006, 1, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestMonthEnd(Base):
_offset = MonthEnd
def test_day_of_month(self):
dt = datetime(2007, 1, 1)
offset = MonthEnd()
result = dt + offset
assert result == Timestamp(2007, 1, 31)
result = result + offset
assert result == Timestamp(2007, 2, 28)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + MonthEnd(normalize=True)
expected = dt.replace(hour=0) + MonthEnd()
assert result == expected
offset_cases = []
offset_cases.append(
(
MonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
MonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
},
)
)
offset_cases.append(
(
MonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
MonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestBMonthBegin(Base):
_offset = BMonthBegin
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthBegin()
offset2 = BMonthBegin()
assert not offset1 != offset2
offset_cases = []
offset_cases.append(
(
BMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 1): datetime(2006, 10, 2),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
BMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 10, 2): datetime(2006, 10, 2),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 15): datetime(2006, 10, 2),
},
)
)
offset_cases.append(
(
BMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 1, 15): datetime(2008, 3, 3),
datetime(2006, 12, 29): datetime(2007, 2, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
BMonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 2),
datetime(2008, 6, 1): datetime(2008, 5, 1),
datetime(2008, 3, 10): datetime(2008, 3, 3),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 30): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BMonthBegin(), datetime(2007, 12, 31), False),
(BMonthBegin(), datetime(2008, 1, 1), True),
(BMonthBegin(), datetime(2001, 4, 2), True),
(BMonthBegin(), datetime(2008, 3, 3), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestBMonthEnd(Base):
_offset = BMonthEnd
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + BMonthEnd(normalize=True)
expected = dt.replace(hour=0) + BMonthEnd()
assert result == expected
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthEnd()
offset2 = BMonthEnd()
assert not offset1 != offset2
offset_cases = []
offset_cases.append(
(
BMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
},
)
)
offset_cases.append(
(
BMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
# --------------------------------------------------------------------
# Quarters
class TestQuarterBegin(Base):
def test_repr(self):
expected = "<QuarterBegin: startingMonth=3>"
assert repr(QuarterBegin()) == expected
expected = "<QuarterBegin: startingMonth=3>"
assert repr(QuarterBegin(startingMonth=3)) == expected
expected = "<QuarterBegin: startingMonth=1>"
assert repr(QuarterBegin(startingMonth=1)) == expected
def test_is_anchored(self):
assert QuarterBegin(startingMonth=1).is_anchored()
assert QuarterBegin().is_anchored()
assert not QuarterBegin(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = QuarterBegin(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
offset_cases = []
offset_cases.append(
(
QuarterBegin(startingMonth=1),
{
datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 4, 30): datetime(2008, 5, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 30): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1),
datetime(2008, 7, 1): datetime(2008, 4, 1),
},
)
)
offset_cases.append(
(
QuarterBegin(startingMonth=1, n=2),
{
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 10, 1),
datetime(2008, 4, 1): datetime(2008, 10, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestQuarterEnd(Base):
_offset = QuarterEnd
def test_repr(self):
expected = "<QuarterEnd: startingMonth=3>"
assert repr(QuarterEnd()) == expected
expected = "<QuarterEnd: startingMonth=3>"
assert repr(QuarterEnd(startingMonth=3)) == expected
expected = "<QuarterEnd: startingMonth=1>"
assert repr(QuarterEnd(startingMonth=1)) == expected
def test_is_anchored(self):
assert QuarterEnd(startingMonth=1).is_anchored()
assert QuarterEnd().is_anchored()
assert not QuarterEnd(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = QuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)
offset_cases = []
offset_cases.append(
(
QuarterEnd(startingMonth=1),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 5, 31),
datetime(2008, 3, 31): datetime(2008, 5, 31),
datetime(2008, 4, 15): datetime(2008, 5, 31),
datetime(2008, 4, 30): datetime(2008, 5, 31),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
datetime(2008, 7, 1): datetime(2008, 4, 30),
},
)
)
offset_cases.append(
(
QuarterEnd(startingMonth=1, n=2),
{
datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestBQuarterBegin(Base):
_offset = BQuarterBegin
def test_repr(self):
expected = "<BusinessQuarterBegin: startingMonth=3>"
assert repr(BQuarterBegin()) == expected
expected = "<BusinessQuarterBegin: startingMonth=3>"
assert repr(BQuarterBegin(startingMonth=3)) == expected
expected = "<BusinessQuarterBegin: startingMonth=1>"
assert repr(BQuarterBegin(startingMonth=1)) == expected
def test_is_anchored(self):
assert BQuarterBegin(startingMonth=1).is_anchored()
assert BQuarterBegin().is_anchored()
assert not BQuarterBegin(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = BQuarterBegin(n=-1, startingMonth=1)
assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)
offset_cases = []
offset_cases.append(
(
BQuarterBegin(startingMonth=1),
{
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2007, 3, 15): datetime(2007, 4, 2),
datetime(2007, 2, 28): datetime(2007, 4, 2),
datetime(2007, 1, 1): datetime(2007, 4, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 7, 2),
datetime(2008, 4, 30): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 8, 15): datetime(2008, 11, 3),
datetime(2008, 9, 15): datetime(2008, 11, 3),
datetime(2008, 11, 1): datetime(2008, 11, 3),
datetime(2008, 4, 30): datetime(2008, 5, 1),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2007, 12, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 15): datetime(2008, 4, 1),
datetime(2008, 2, 27): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 4, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 2): datetime(2007, 7, 2),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2007, 7, 3): datetime(2007, 7, 2),
datetime(2007, 4, 3): datetime(2007, 4, 2),
datetime(2007, 7, 2): datetime(2007, 4, 2),
datetime(2008, 4, 1): datetime(2008, 1, 1),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=1, n=2),
{
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 1, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2007, 3, 31): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 10, 1),
datetime(2008, 4, 30): datetime(2008, 10, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestBQuarterEnd(Base):
_offset = BQuarterEnd
def test_repr(self):
expected = "<BusinessQuarterEnd: startingMonth=3>"
assert repr(BQuarterEnd()) == expected
expected = "<BusinessQuarterEnd: startingMonth=3>"
assert repr(BQuarterEnd(startingMonth=3)) == expected
expected = "<BusinessQuarterEnd: startingMonth=1>"
assert repr(BQuarterEnd(startingMonth=1)) == expected
def test_is_anchored(self):
assert BQuarterEnd(startingMonth=1).is_anchored()
assert BQuarterEnd().is_anchored()
assert not BQuarterEnd(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)
offset_cases = []
offset_cases.append(
(
BQuarterEnd(startingMonth=1),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=1, n=2),
{
datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
# --------------------------------------------------------------------
# Years
class TestYearBegin(Base):
_offset = YearBegin
def test_misspecified(self):
with pytest.raises(ValueError, match="Month must go from 1 to 12"):
YearBegin(month=13)
offset_cases = []
offset_cases.append(
(
YearBegin(),
{
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(3),
{
datetime(2008, 1, 1): datetime(2011, 1, 1),
datetime(2008, 6, 30): datetime(2011, 1, 1),
datetime(2008, 12, 31): datetime(2011, 1, 1),
datetime(2005, 12, 30): datetime(2008, 1, 1),
datetime(2005, 12, 31): datetime(2008, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2007, 1, 15): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(-2),
{
datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(month=4),
{
datetime(2007, 4, 1): datetime(2008, 4, 1),
datetime(2007, 4, 15): datetime(2008, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(0, month=4),
{
datetime(2007, 4, 1): datetime(2007, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(4, month=4),
{
datetime(2007, 4, 1): datetime(2011, 4, 1),
datetime(2007, 4, 15): datetime(2011, 4, 1),
datetime(2007, 3, 1): datetime(2010, 4, 1),
datetime(2007, 12, 15): datetime(2011, 4, 1),
datetime(2012, 1, 31): datetime(2015, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(-1, month=4),
{
datetime(2007, 4, 1): datetime(2006, 4, 1),
datetime(2007, 3, 1): datetime(2006, 4, 1),
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(-3, month=4),
{
datetime(2007, 4, 1): datetime(2004, 4, 1),
datetime(2007, 3, 1): datetime(2004, 4, 1),
datetime(2007, 12, 15): datetime(2005, 4, 1),
datetime(2012, 1, 31): datetime(2009, 4, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestYearEnd(Base):
_offset = YearEnd
def test_misspecified(self):
with pytest.raises(ValueError, match="Month must go from 1 to 12"):
YearEnd(month=13)
offset_cases = []
offset_cases.append(
(
YearEnd(),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
YearEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
},
)
)
offset_cases.append(
(
YearEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
YearEnd(-2),
{
datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestYearEndDiffMonth(Base):
offset_cases = []
offset_cases.append(
(
YearEnd(month=3),
{
datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 15): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2009, 3, 31),
datetime(2008, 3, 30): datetime(2008, 3, 31),
datetime(2005, 3, 31): datetime(2006, 3, 31),
datetime(2006, 7, 30): datetime(2007, 3, 31),
},
)
)
offset_cases.append(
(
YearEnd(0, month=3),
{
datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 28): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2008, 3, 31),
datetime(2005, 3, 30): datetime(2005, 3, 31),
},
)
)
offset_cases.append(
(
YearEnd(-1, month=3),
{
datetime(2007, 1, 1): datetime(2006, 3, 31),
datetime(2008, 2, 28): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2007, 3, 31),
datetime(2006, 3, 29): datetime(2005, 3, 31),
datetime(2006, 3, 30): datetime(2005, 3, 31),
datetime(2007, 3, 1): datetime(2006, 3, 31),
},
)
)
offset_cases.append(
(
YearEnd(-2, month=3),
{
datetime(2007, 1, 1): datetime(2005, 3, 31),
datetime(2008, 6, 30): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2006, 3, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(YearEnd(month=3), datetime(2007, 3, 31), True),
(YearEnd(month=3), datetime(2008, 1, 1), False),
(YearEnd(month=3), datetime(2006, 3, 31), True),
(YearEnd(month=3), datetime(2006, 3, 29), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestBYearBegin(Base):
_offset = BYearBegin
def test_misspecified(self):
msg = "Month must go from 1 to 12"
with pytest.raises(ValueError, match=msg):
BYearBegin(month=13)
with pytest.raises(ValueError, match=msg):
BYearEnd(month=13)
offset_cases = []
offset_cases.append(
(
BYearBegin(),
{
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2011, 1, 1): datetime(2011, 1, 3),
datetime(2011, 1, 3): datetime(2012, 1, 2),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2),
},
)
)
offset_cases.append(
(
BYearBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2),
},
)
)
offset_cases.append(
(
BYearBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 1, 2),
datetime(2009, 1, 4): datetime(2009, 1, 1),
datetime(2009, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 2),
datetime(2006, 12, 30): datetime(2006, 1, 2),
datetime(2006, 1, 1): datetime(2005, 1, 3),
},
)
)
offset_cases.append(
(
BYearBegin(-2),
{
datetime(2007, 1, 1): datetime(2005, 1, 3),
datetime(2007, 6, 30): datetime(2006, 1, 2),
datetime(2008, 12, 31): datetime(2007, 1, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestBYearEnd(Base):
_offset = BYearEnd
offset_cases = []
offset_cases.append(
(
BYearEnd(),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BYearEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BYearEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BYearEnd(-2),
{
datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestBYearEndLagged(Base):
_offset = BYearEnd
def test_bad_month_fail(self):
msg = "Month must go from 1 to 12"
with pytest.raises(ValueError, match=msg):
BYearEnd(month=13)
with pytest.raises(ValueError, match=msg):
BYearEnd(month=0)
offset_cases = []
offset_cases.append(
(
BYearEnd(month=6),
{
datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30),
},
)
)
offset_cases.append(
(
BYearEnd(n=-1, month=6),
{
datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
assert offset.rollforward(date) == datetime(2010, 6, 30)
assert offset.rollback(date) == datetime(2009, 6, 30)
on_offset_cases = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
| bsd-3-clause |
carrillo/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
eickenberg/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/datasets/samples_generator.py | 103 | 56423 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
jviada/QuantEcon.py | examples/perm_inc_figs.py | 7 | 1538 | """
Plots consumption, income and debt for the simple infinite horizon LQ
permanent income model with Gaussian iid income.
"""
import random
import numpy as np
import matplotlib.pyplot as plt
r = 0.05
beta = 1 / (1 + r)
T = 60
sigma = 0.15
mu = 1
def time_path():
w = np.random.randn(T+1) # w_0, w_1, ..., w_T
w[0] = 0
b = np.zeros(T+1)
for t in range(1, T+1):
b[t] = w[1:t].sum()
b = - sigma * b
c = mu + (1 - beta) * (sigma * w - b)
return w, b, c
# == Figure showing a typical realization == #
if 1:
fig, ax = plt.subplots()
p_args = {'lw': 2, 'alpha': 0.7}
ax.grid()
ax.set_xlabel(r'Time')
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 'upper left',
'mode': 'expand'}
w, b, c = time_path()
ax.plot(list(range(T+1)), mu + sigma * w, 'g-',
label="non-financial income", **p_args)
ax.plot(list(range(T+1)), c, 'k-', label="consumption", **p_args)
ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args)
ax.legend(ncol=3, **legend_args)
plt.show()
# == Figure showing multiple consumption paths == #
if 0:
fig, ax = plt.subplots()
p_args = {'lw': 0.8, 'alpha': 0.7}
ax.grid()
ax.set_xlabel(r'Time')
ax.set_ylabel(r'Consumption')
b_sum = np.zeros(T+1)
for i in range(250):
rcolor = random.choice(('c', 'g', 'b', 'k'))
w, b, c = time_path()
ax.plot(list(range(T+1)), c, color=rcolor, **p_args)
plt.show()
| bsd-3-clause |
RyanChinSang/ECNG3020-ORSS4SCVI | BETA/dev01/ORSSd01-c.py | 1 | 7756 | import os
import cv2
import queue
import pyttsx3
import webcolors
import threading
import matplotlib
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import speech_recognition as sr
import matplotlib.patches as patches
from PyQt5 import QtWidgets, QtGui
from BETA.dev01.models.VideoStream import VideoStream
from BETA.dev01.models.SpchRecg import internet, google_sr, sphinx_sr
from BETA.dev01.utils import visualization_utils as vis_util
from BETA.dev01.utils import label_map_util
def s2t_listen():
global s2t_string
with voice_lock:
# if internet():
# with sr.Microphone(device_index=0) as source:
# audio = sr.Recognizer().listen(source)
# s2t_string = google_sr(audio)
# else:
# pass
with sr.Microphone(device_index=0) as source:
# print("Say something!")
audio = sr.Recognizer().listen(source)
if internet():
# TODO: thread this?
s2t_string = google_sr(audio)
else:
s2t_string = sphinx_sr(audio)
def t2s_say(word):
t2s_engine.say(word)
try:
t2s_engine.runAndWait()
except RuntimeError:
pass
def avg_color():
frame = cap.read()[1]
rgb = np.array([])
for x in range(size * 2):
for y in range(size * 2):
if (x, y) == (0, 0):
rgb = np.array([frame[int((frame_height / 2) - size) + x][
int((frame_width / 2) - size) + y]])
else:
rgb = np.append(rgb, [frame[int((frame_height / 2) - size) + x][
int((frame_width / 2) - size) + y]], axis=0)
requested_color = rgb.mean(axis=0)
min_colors = {}
for key, name in webcolors.css3_hex_to_names.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - requested_color[2]) ** 2
gd = (g_c - requested_color[1]) ** 2
bd = (b_c - requested_color[0]) ** 2
min_colors[(rd + gd + bd)] = name
closest_color = str(min_colors[min(min_colors.keys())])
print(closest_color)
threading.Thread(target=t2s_say, args=(closest_color,), daemon=True).start()
def close_handler(event):
cap.release()
plt.close('all')
def active_handler(event):
global s2t_string
if s2t_string == 'color':
# print('Colour ID function goes here (threaded)')
threading.Thread(target=avg_color, args=(), daemon=True).start()
s2t_string = None
elif s2t_string == 'quit':
close_handler(event)
else:
threading.Thread(target=s2t_listen, args=(), daemon=True).start()
# threading.Thread(target=pprint, args=(s2t_string, ), daemon=True).start()
# s2t_string = threading.Thread(target=s2t_listen, args=(), daemon=True).start()
# print(s2t_string)
def key_press_handler(event):
print(event.key)
def configure(frame, version):
matplotlib.rcParams['figure.subplot.bottom'] = '0.0'
matplotlib.rcParams['figure.subplot.left'] = '0.0'
matplotlib.rcParams['figure.subplot.top'] = '1.0'
matplotlib.rcParams['figure.subplot.right'] = '1.0'
matplotlib.rcParams['figure.subplot.right'] = '1.0'
matplotlib.rcParams["figure.figsize"] = [(frame.shape[1] / int(matplotlib.rcParams['figure.dpi'])),
((frame.shape[0] - (48 + 20)) / int(matplotlib.rcParams['figure.dpi']))]
for key in matplotlib.rcParams:
if 'keymap' in key:
print(key + ": " + str(matplotlib.rcParams.get(key)))
plt.switch_backend('Qt5Agg')
plt.get_current_fig_manager().canvas.mpl_connect('key_press_event', key_press_handler)
plt.get_current_fig_manager().canvas.mpl_connect('draw_event', active_handler)
plt.get_current_fig_manager().canvas.mpl_connect('close_event', close_handler)
plt.get_current_fig_manager().canvas.set_window_title('ORSS4SCVI ' + version)
plt.get_current_fig_manager().canvas.manager.window.findChild(QtWidgets.QToolBar).setVisible(False)
plt.get_current_fig_manager().canvas.manager.window.statusBar().setVisible(False)
plt.get_current_fig_manager().window.setWindowIcon(QtGui.QIcon(PATH_TO_ICON))
plt.axis('off')
avg_fps = np.array([])
size = 20
s2t_string = None
q = queue.Queue()
voice_lock = threading.Lock()
print_lock = threading.Lock()
t2s_engine = pyttsx3.init()
t2s_engine.setProperty('voice', t2s_engine.getProperty('voices')[1].__dict__.get('id'))
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
PATH_TO_ICON = os.path.dirname(__file__) + '/static/icons/icon.ico'
NUM_CLASSES = 90
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
cap = VideoStream(src=1, height=360, ratio=(16/9)).start()
init_frame = cap.read()[1]
frame_height, frame_width = init_frame.shape[:2]
freq = cv2.getTickFrequency()
configure(frame=init_frame, version='dev0.1a')
fig = plt.gcf()
ax = plt.gca()
img = plt.imshow(cv2.cvtColor(init_frame, cv2.COLOR_BGR2RGB), aspect='auto')
rect = patches.Rectangle(xy=(int((frame_width / 2) - size),
int((frame_height / 2) - size)), # Top-left point
width=int(size * 2),
height=int(size * 2),
linewidth=1,
edgecolor='white',
facecolor='none')
ax.add_patch(rect)
t = ax.text(5, 15, '{:.2f}'.format(0), color='white')
with detection_graph.as_default():
# Load a frozen Tensorflow model into memory.
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Detection
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
while cap.isOpened():
s = cv2.getTickCount()
_, image_np = cap.read()
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=2)
img.set_data(cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB))
if plt.get_fignums():
fig.canvas.draw()
fig.canvas.flush_events()
plt.show(block=False)
f = cv2.getTickCount()
avg_fps = np.append(avg_fps, (freq / (f - s)))
t.set_text('{:.2f}'.format(avg_fps[-1]))
| gpl-3.0 |
lebedov/nseindia_reformat | analyze.py | 1 | 10517 | #!/usr/bin/env python
"""
Analyze parsed Indian National Stock Exchange trades data.
"""
import csv, datetime, sys
import numpy as np
import pandas
def rount_ceil_minute(d):
"""
Round to nearest minute after the specified time.
Parameters
----------
d : datetime.datetime
A time expressed using the `datetime.datetime` class.
"""
return d-datetime.timedelta(seconds=d.second,
microseconds=d.microsecond,
minutes=(-1 if d.second != 0 or d.microsend != 0 else 0))
def sample(df, delta, date_time_col, *data_cols):
"""
Sample data at a specific sampling time interval.
Return a table of data containing at least two columns; one column contains times
separated by the specified sampling interval, while the others contain the
data points associated with the most recent times in the original table
prior to each successive sampling time.
Parameters
----------
df : pandas.DataFrame
Data to sample. Must contain columns with the names in `data_cols` and
`date_time_col`.
delta : datetime.timedelta
Sampling interval.
date_time_col : str
Name of date/time.
data_cols : tuple of str
Name(s) of data column.
Returns
-------
out : pandas.DataFrame
DataFrame containing column of resampled data points and sampling times.
Notes
-----
Sampling begins at 9:15 AM and ends at 3:30 PM.
"""
result_dict = {date_time_col: []}
for col in data_cols:
result_dict[col] = []
# if len(df) == 0:
# return result_dict
temp = df.irow(0)[date_time_col]
t_start = datetime.datetime(temp.year, temp.month, temp.day, 9, 15, 0, 0)
t_end = datetime.datetime(temp.year, temp.month, temp.day, 15, 30, 0, 0)
data_dict_last = {}
for col in data_cols:
data_dict_last[col] = df.irow(0)[col]
t = t_start
i = 0
while t <= t_end:
# Save the sample time:
result_dict[date_time_col].append(t)
# Store the data associated with the first event as the sampled value
# between 9:15 AM and the time of first event when the
# latter occurs later than the former:
if t >= df[date_time_col].min():
# Only update the data point stored at each time point when
# a time point in the original series is passed:
while i < len(df) and t >= df.irow(i)[date_time_col]:
for col in data_cols:
data_dict_last[col] = df.irow(i)[col]
i += 1
for col in data_cols:
result_dict[col].append(data_dict_last[col])
t += delta
return pandas.DataFrame(result_dict)
def analyze(file_name):
"""
Analyze parsed India data in specified file name.
Parameters
----------
file_name : str
Name of CSV file containing parsed India data.
Returns
-------
output : list
Results of analysis. These include the following (in order):
number of trades with quantity below Q1
number of trades with quantity below Q2
number of trades with quantity below Q3
maximum trade quantity
minimum trade quantity
mean trade quantity
number of trades with interarrival times below Q1
number of trades with interarrival times below Q2
number of trades with interarrival times below Q3
maximum daily trade volume
minimum daily trade volume
mean daily trade volume
median daily trade volume
mean trade price over entire month
standard deviation of trade price sampled over 3 minutes in bps
mean returns sampled over 3 minutes in bps
standard deviation of returns sampled over 3 minutes in bps
sum of absolute values of returns sampled over 3 minutes in bps
maximum daily price in bps (for each business day of 9/2012)
minimum daily price in bps (for each business day of 9/2012)
"""
df = pandas.read_csv(file_name, header=None,
names=['record_indicator',
'segment',
'trade_number',
'trade_date',
'trade_time',
'symbol',
'instrument',
'expiry_date',
'strike_price',
'option_type',
'trade_price',
'trade_quantity',
'buy_order_num',
'buy_algo_ind',
'buy_client_id_flag',
'sell_order_num',
'sell_algo_ind',
'sell_client_id_flag'])
# Find quartiles of number of trades:
trade_quant_q1 = df['trade_quantity'].quantile(0.25)
trade_quant_q2 = df['trade_quantity'].quantile(0.50)
trade_quant_q3 = df['trade_quantity'].quantile(0.75)
# Trade quantity stats:
max_trade_quant = df['trade_quantity'].max()
min_trade_quant = df['trade_quantity'].min()
mean_trade_quant = df['trade_quantity'].mean()
# Convert trade date/times to datetime.timedelta and join the column to the
# original data:
s_trade_date_time = \
df[['trade_date', 'trade_time']].apply(lambda x: \
datetime.datetime.strptime(x[0] + ' ' + x[1], '%m/%d/%Y %H:%M:%S.%f'),
axis=1)
s_trade_date_time.name = 'trade_date_time'
df = df.join(s_trade_date_time)
# Compute trade interarrival times for each day (i.e., the interval between
# the last trade on one day and the first day on the following day should
# not be regarded as an interarrival time). Note that this returns the times
# in nanoseconds:
s_inter_time = df.groupby('trade_date')['trade_date_time'].apply(lambda x: x.diff())
# Exclude the NaNs that result because of the application of the diff()
# method to each group of trade times:
s_inter_time = s_inter_time[s_inter_time.notnull()]
if len(s_inter_time) > 0:
# Convert interarrival times from nanoseconds to seconds:
s_inter_time = s_inter_time.apply(lambda x: x*10**-9)
# Find interarrival time quartiles:
inter_time_q1 = s_inter_time.quantile(0.25)
inter_time_q2 = s_inter_time.quantile(0.50)
inter_time_q3 = s_inter_time.quantile(0.75)
else:
# If there are not enough trades per day to compute interarrival times,
# set the number of times to 0 for each quantile:
inter_time_q1 = 0
inter_time_q2 = 0
inter_time_q3 = 0
# Compute the daily traded volume:
s_daily_vol = \
df.groupby('trade_date')['trade_quantity'].apply(sum)
df_daily_vol = pandas.DataFrame({'trade_date': s_daily_vol.index,
'trade_quantity': s_daily_vol.values})
# Set the number of trades for days on which no trades were recorded to 0:
sept_days = map(lambda d: '09/%02i/2012' % d,
[3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 17, 18, 19, 20, 21, 24,
25, 26, 27, 28])
df_daily_vol = \
df_daily_vol.combine_first(pandas.DataFrame({'trade_date': sept_days,
'trade_quantity': np.zeros(len(sept_days))}))
# Compute daily volume stats:
max_daily_vol = df_daily_vol['trade_quantity'].max()
min_daily_vol = df_daily_vol['trade_quantity'].min()
mean_daily_vol = df_daily_vol['trade_quantity'].mean()
median_daily_vol = df_daily_vol['trade_quantity'].median()
# Sample trade prices every 3 minutes for each day of the month and combine
# into a single DataFrame:
# XXX need to handle empty groups
df_trade_price_res = \
df.groupby('trade_date').apply(lambda d: \
sample(d, datetime.timedelta(minutes=3),
'trade_date_time', 'trade_price', 'trade_date'))
# Compute the average price trade for the entire month of data:
mean_trade_price = df['trade_price'].mean()
# Compute standard deviation of sampled prices:
std_trade_price_res = df_trade_price_res['trade_price'].std()
# Compute returns:
s_returns = \
df_trade_price_res.groupby('trade_date').apply( \
lambda x: x['trade_price'].diff()/x['trade_price'])
s_returns = s_returns[s_returns.notnull()]
if len(s_returns) > 0:
# Compute average and standard deviation of returns in bps:
mean_returns = s_returns.mean()*10000
std_returns = s_returns.std()*10000
sum_abs_returns = sum(abs(s_returns))*10000
else:
# If there are too few trades with which to compute returns, set the
# return statistics to 0:
mean_returns = 0.0
std_returns = 0.0
sum_abs_returns = 0.0
# Set the trade price for days on which no trades were recorded to 0:
df_price = df[['trade_date', 'trade_price']]
df_price = df_price.combine_first(pandas.DataFrame({'trade_date': sept_days,
'trade_price': np.zeros(len(sept_days))}))
# Compute the daily maximum and minimum trade price expressed in basis
# points away from the daily opening price:
daily_price_max_list = map(lambda x: 10000*int(x),
df_price.groupby('trade_date')['trade_price'].apply(max)-df.ix[0]['trade_price'])
daily_price_min_list = map(lambda x: 10000*int(x),
df_price.groupby('trade_date')['trade_price'].apply(min)-df.ix[0]['trade_price'])
return [trade_quant_q1, trade_quant_q2, trade_quant_q3,
max_trade_quant, min_trade_quant, mean_trade_quant,
inter_time_q1, inter_time_q2, inter_time_q3,
max_daily_vol, min_daily_vol, mean_daily_vol,
median_daily_vol, mean_trade_price, std_trade_price_res,
mean_returns, std_returns, sum_abs_returns] + daily_price_max_list + daily_price_min_list
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'need to specify input files'
sys.exit(0)
w = csv.writer(sys.stdout)
for file_name in sys.argv[1:]:
row = analyze(file_name)
w.writerow([file_name] + row)
| bsd-3-clause |
appapantula/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
PanDAWMS/panda-server | pandaserver/taskbuffer/ConBridge.py | 1 | 18674 | import os
import sys
import time
import types
import socket
import signal
import random
import threading
import traceback
try:
import cPickle as pickle
except ImportError:
import pickle
from pandaserver.taskbuffer import OraDBProxy as DBProxy
from pandaserver.config import panda_config
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec
from pandaserver.taskbuffer.DatasetSpec import DatasetSpec
from pandacommon.pandalogger.PandaLogger import PandaLogger
try:
long
except NameError:
long = int
# logger
_logger = PandaLogger().getLogger('ConBridge')
# exception for normal termination
class HarmlessEx(Exception):
pass
# terminate child process by itself when master has gone
class Terminator (threading.Thread):
# constructor
def __init__(self,consock):
threading.Thread.__init__(self)
self.consock = consock
# main
def run(self):
# watching control socket
try:
rcvSize = self.consock.recv(1)
except Exception:
pass
# get PID
pid = os.getpid()
_logger.debug("child %s received termination" % pid)
# kill
try:
os.kill(pid,signal.SIGTERM)
except Exception:
pass
try:
os.kill(pid,signal.SIGKILL)
except Exception:
pass
# connection bridge with with timeout
class ConBridge (object):
# constructor
def __init__(self):
self.child_pid = 0
self.isMaster = False
self.mysock = None
self.consock = None
self.pid = os.getpid()
# timeout
if hasattr(panda_config,'dbtimeout'):
self.timeout = int(panda_config.dbtimeout)
else:
self.timeout = 600
# verbose
if hasattr(panda_config,'dbbridgeverbose'):
self.verbose = panda_config.dbbridgeverbose
else:
self.verbose = False
# destructor
def __del__(self):
# kill old child process
self.bridge_killChild()
# connect
def connect(self,dbhost=panda_config.dbhost,dbpasswd=panda_config.dbpasswd,
dbuser=panda_config.dbuser,dbname=panda_config.dbname,
dbtimeout=None,reconnect=False):
# kill old child process
self.bridge_killChild()
_logger.debug('master %s connecting' % self.pid)
# reset child PID and sockets
self.child_pid = 0
self.mysock = None
self.consock = None
# create socket
datpair = socket.socketpair()
conpair = socket.socketpair()
# fork
self.child_pid = os.fork()
if self.child_pid == 0:
# child
self.isMaster = False
self.pid = os.getpid()
# keep socket
self.mysock = datpair[1]
self.consock = conpair[1]
datpair[0].close()
conpair[0].close()
# connect to database
_logger.debug('child %s connecting to database' % self.pid)
self.proxy = DBProxy.DBProxy()
if not self.proxy.connect(dbhost=dbhost,dbpasswd=dbpasswd,dbtimeout=60):
_logger.error('child %s failed to connect' % self.pid)
# send error
self.bridge_sendError((RuntimeError,'child %s connection failed' % self.pid))
# exit
self.bridge_childExit()
# send OK just for ACK
_logger.debug('child %s connection is ready' % self.pid)
self.bridge_sendResponse(None)
# start terminator
Terminator(self.consock).start()
# go main loop
_logger.debug('child %s going into the main loop' % self.pid)
self.bridge_run()
# exit
self.bridge_childExit(0)
else:
# master
self.isMaster = True
# keep socket
self.mysock = datpair[0]
self.consock = conpair[0]
datpair[1].close()
conpair[1].close()
try:
# get ACK
_logger.debug('master %s waiting ack from child=%s' % (self.pid,self.child_pid))
self.bridge_getResponse()
_logger.debug('master %s got ready from child=%s' % (self.pid,self.child_pid))
return True
except Exception as e:
_logger.error('master %s failed to setup child=%s : %s %s' % \
(self.pid, self.child_pid, str(e), traceback.format_exc()))
# kill child
self.bridge_killChild()
return False
#######################
# communication methods
# send packet
def bridge_send(self,val):
try:
# set timeout
if self.isMaster:
self.mysock.settimeout(self.timeout)
# serialize
tmpStr = pickle.dumps(val, protocol=0)
# send size
sizeStr = "%50s" % len(tmpStr)
self.mysock.sendall(sizeStr.encode())
# send body
self.mysock.sendall(tmpStr)
# set timeout back
if self.isMaster:
self.mysock.settimeout(None)
except Exception as e:
if self.isMaster:
roleType = 'master'
else:
roleType = 'child '
_logger.error('%s %s send error : val=%s - %s %s' % (roleType, self.pid, str(val)[:1024], str(e),
traceback.format_exc()))
# terminate child
if not self.isMaster:
self.bridge_childExit()
raise e
# receive packet
def bridge_recv(self):
try:
# set timeout
if self.isMaster:
self.mysock.settimeout(self.timeout)
# get size
strSize = None
headSize = 50
while strSize is None or len(strSize) < headSize:
if strSize is None:
tmpSize = headSize
else:
tmpSize = headSize - len(strSize)
tmpStr = self.mysock.recv(tmpSize)
if len(tmpStr) == 0:
if self.isMaster:
raise socket.error('empty packet')
else:
# master closed socket
raise HarmlessEx('empty packet')
if strSize is None:
strSize = tmpStr
else:
strSize += tmpStr
if strSize is None:
strSize = ''
else:
strSize = strSize.decode()
# get body
strBody = None
bodySize = long(strSize)
while strBody is None or len(strBody) < bodySize:
if strBody is None:
tmpSize = bodySize
else:
tmpSize = bodySize - len(strBody)
tmpStr = self.mysock.recv(tmpSize)
if len(tmpStr) == 0:
if self.isMaster:
raise socket.error('empty packet')
else:
# master closed socket
raise HarmlessEx('empty packet')
if strBody is None:
strBody = tmpStr
else:
strBody += tmpStr
if strBody is None:
strBody = ''.encode()
# set timeout back
if self.isMaster:
self.mysock.settimeout(None)
# deserialize
retVal = pickle.loads(strBody)
return True,retVal
except Exception as e:
if self.isMaster:
roleType = 'master'
else:
roleType = 'child '
if type(e) == HarmlessEx:
_logger.debug('%s %s recv harmless ex : %s' % \
(roleType, self.pid, str(e)))
else:
_logger.error('%s %s recv error : %s %s' % \
(roleType, self.pid, str(e),
traceback.format_exc()))
# terminate child
if not self.isMaster:
self.bridge_childExit()
raise e
#######################
# child's methods
# send error
def bridge_sendError(self,val):
# send status
self.bridge_send("NG")
# check if pickle-able
try:
pickle.dumps(val, protocol=0)
except Exception:
# use RuntimeError
val = (RuntimeError,str(val[-1]))
# send exceptions
self.bridge_send(val)
# send response
def bridge_sendResponse(self,val):
# send status
self.bridge_send("OK")
# send response
self.bridge_send(val)
# termination of child
def bridge_childExit(self,exitCode=1):
if not self.isMaster:
_logger.debug("child %s closing sockets" % self.pid)
# close sockets
try:
self.mysock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
self.consock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
# exit
_logger.debug("child %s going to exit" % self.pid)
os._exit(exitCode)
# child main
def bridge_run(self):
comStr = ''
while True:
try:
# get command
status,comStr = self.bridge_recv()
if not status:
raise RuntimeError('invalid command')
# get variables
status,variables = self.bridge_recv()
if not status:
raise RuntimeError('invalid variables')
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error('child %s died : %s %s' % (self.pid,errType,errValue))
# exit
self.bridge_childExit()
if self.verbose:
_logger.debug('child %s method %s executing' % (self.pid,comStr))
try:
# execute
method = getattr(self.proxy,comStr)
res = method(*variables[0], **variables[1])
# FIXME : modify response since cx_Oracle types cannot be picked
if comStr in ['querySQLS']:
newRes = [True]+res[1:]
res = newRes
if self.verbose:
_logger.debug('child %s method %s completed' % (self.pid,comStr))
# return
self.bridge_sendResponse((res,variables[0],variables[1]))
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error('child %s method %s failed : %s %s' % (self.pid,comStr,errType,errValue))
if errType in [socket.error,socket.timeout]:
_logger.error('child %s died : %s %s' % (self.pid,errType,errValue))
# exit
self.bridge_childExit()
# send error
self.bridge_sendError((errType,errValue))
#######################
# master's methods
# kill child
def bridge_killChild(self):
# kill old child process
if self.child_pid != 0:
# close sockets
_logger.debug('master %s closing sockets for child=%s' % (self.pid,self.child_pid))
try:
if self.mysock is not None:
self.mysock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
if self.consock is not None:
self.consock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
_logger.debug('master %s killing child=%s' % (self.pid,self.child_pid))
# send SIGTERM
try:
os.kill(self.child_pid,signal.SIGTERM)
except Exception:
pass
time.sleep(2)
# send SIGKILL
try:
os.kill(self.child_pid,signal.SIGKILL)
except Exception:
pass
# wait for completion of child
_logger.debug('master %s waiting child=%s' % (self.pid,self.child_pid))
try:
os.waitpid(self.child_pid,0)
except Exception:
pass
# sleep to avoid burst reconnection
time.sleep(random.randint(5,15))
_logger.debug('master %s killed child=%s' % (self.pid,self.child_pid))
# get responce
def bridge_getResponse(self):
# get status
status,strStatus = self.bridge_recv()
if not status:
raise RuntimeError('master %s got invalid status response from child=%s' % \
(self.pid,self.child_pid))
if strStatus == 'OK':
# return res
status,ret = self.bridge_recv()
if not status:
raise RuntimeError('master %s got invalid response body from child=%s' % \
(self.pid,self.child_pid))
return ret
elif strStatus == 'NG':
# raise error
status,ret = self.bridge_recv()
if not status:
raise RuntimeError('master %s got invalid response value from child=%s' % \
(self.pid,self.child_pid))
raise ret[0](ret[1])
else:
raise RuntimeError('master %s got invalid response from child=%s : %s' % \
(self.pid,self.child_pid,str(strStatus)))
# method wrapper class
class bridge_masterMethod:
# constructor
def __init__(self,name,parent):
self.name = name
self.parent = parent
self.pid = os.getpid()
# copy changes in taskbuff objects to master
def copyTbObjChanges(self,oldPar,newPar):
# check they have the same type
if type(oldPar) != type(newPar):
return False
# copy some Specs since they are passed via ref's
if isinstance(oldPar,JobSpec) or isinstance(oldPar,FileSpec) \
or isinstance(oldPar,DatasetSpec):
if hasattr(oldPar,'__getstate__'):
tmpStat = newPar.__getstate__()
oldPar.__setstate__(tmpStat)
else:
tmpStat = newPar.values()
oldPar.pack(tmpStat)
return True
# copy Datasets
return False
# copy changes in objects to master
def copyChanges(self,oldPar,newPar):
if isinstance(oldPar, list):
# delete all elements first
while len(oldPar) > 0:
oldPar.pop()
# append
for tmpItem in newPar:
oldPar.append(tmpItem)
elif isinstance(oldPar, dict):
# replace
for tmpKey in newPar:
oldPar[tmpKey] = newPar[tmpKey]
else:
self.copyTbObjChanges(oldPar,newPar)
# method emulation
def __call__(self,*args,**keywords):
while True:
try:
# send command name
self.parent.bridge_send(self.name)
# send variables
self.parent.bridge_send((args,keywords))
# get response
retVal,newArgs,newKeywords = self.parent.bridge_getResponse()
# propagate child's changes in args to master
for idxArg,tmpArg in enumerate(args):
self.copyChanges(tmpArg,newArgs[idxArg])
# propagate child's changes in keywords to master
for tmpKey in keywords:
tmpArg = keywords[tmpKey]
self.copyChanges(tmpArg,newKeywords[tmpKey])
# return
return retVal
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error('master %s method %s failed : %s %s' % \
(self.pid,self.name,errType,errValue))
# reconnect when socket has a problem
if errType not in [socket.error,socket.timeout]:
# kill old child process
self.parent.bridge_killChild()
_logger.error('master %s killed child' % self.pid)
#raise errType,errValue
# sleep
time.sleep(5)
# reconnect
try:
_logger.debug('master %s trying to reconnect' % self.pid)
is_ok = self.parent.connect()
if is_ok:
_logger.debug('master %s reconnect completed' % self.pid)
else:
_logger.debug('master %s reconnect failed. sleep' % self.pid)
time.sleep(120)
except Exception:
_logger.error('master %s connect failed. sleep' % self.pid)
time.sleep(120)
# get atter for cursor attributes
def __getattribute__(self,name):
if object.__getattribute__(self,'isMaster'):
try:
# return origianl attribute
return object.__getattribute__(self,name)
except Exception:
# append methods
if not name.startswith('_') and hasattr(DBProxy.DBProxy,name) and \
isinstance(getattr(DBProxy.DBProxy,name),
(types.MethodType, types.FunctionType)):
# get DBProxy's method wrapper
method = ConBridge.bridge_masterMethod(name,self)
# set method
setattr(self,name,method)
# return
return method
# return origianl attribute for child
return object.__getattribute__(self,name)
| apache-2.0 |
mschmidt87/nest-simulator | examples/nest/Potjans_2014/spike_analysis.py | 15 | 6288 | # -*- coding: utf-8 -*-
#
# spike_analysis.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Merges spike files, produces raster plots, calculates and plots firing rates
import numpy as np
import glob
import matplotlib.pyplot as plt
import os
datapath = '../data'
# get simulation time and numbers of neurons recorded from sim_params.sli
f = open(os.path.join(datapath, 'sim_params.sli'), 'r')
for line in f:
if 't_sim' in line:
T = float(line.split()[1])
if '/record_fraction_neurons_spikes' in line:
record_frac = line.split()[1]
f.close()
f = open(os.path.join(datapath, 'sim_params.sli'), 'r')
for line in f:
if record_frac == 'true':
if 'frac_rec_spikes' in line:
frac_rec = float(line.split()[1])
else:
if 'n_rec_spikes' in line:
n_rec = int(line.split()[1])
f.close()
T_start = 200. # starting point of analysis (to avoid transients)
# load GIDs
gidfile = open(os.path.join(datapath, 'population_GIDs.dat'), 'r')
gids = []
for l in gidfile:
a = l.split()
gids.append([int(a[0]), int(a[1])])
print('Global IDs:')
print(gids)
print()
# number of populations
num_pops = len(gids)
print('Number of populations:')
print(num_pops)
print()
# first GID in each population
raw_first_gids = [gids[i][0] for i in np.arange(len(gids))]
# population sizes
pop_sizes = [gids[i][1] - gids[i][0] + 1 for i in np.arange(len(gids))]
# numbers of neurons for which spikes were recorded
if record_frac == 'true':
rec_sizes = [int(pop_sizes[i] * frac_rec)
for i in xrange(len(pop_sizes))]
else:
rec_sizes = [n_rec] * len(pop_sizes)
# first GID of each population once device GIDs are dropped
first_gids = [int(1 + np.sum(pop_sizes[:i]))
for i in np.arange(len(pop_sizes))]
# last GID of each population once device GIDs are dropped
last_gids = [int(np.sum(pop_sizes[:i + 1]))
for i in np.arange(len(pop_sizes))]
# convert lists to a nicer format, i.e. [[2/3e, 2/3i], []....]
Pop_sizes = [pop_sizes[i:i + 2]
for i in xrange(0, len(pop_sizes), 2)]
print('Population sizes:')
print(Pop_sizes)
print()
Raw_first_gids = [raw_first_gids[i:i + 2] for i in
xrange(0, len(raw_first_gids), 2)]
First_gids = [first_gids[i:i + 2] for i in xrange(0, len(first_gids), 2)]
Last_gids = [last_gids[i:i + 2] for i in xrange(0, len(last_gids), 2)]
# total number of neurons in the simulation
num_neurons = last_gids[len(last_gids) - 1]
print('Total number of neurons:')
print(num_neurons)
print()
# load spikes from gdf files, correct GIDs and merge them in population files,
# and store spike trains
# will contain neuron id resolved spike trains
neuron_spikes = [[] for i in np.arange(num_neurons + 1)]
# container for population-resolved spike data
spike_data = [[[], []], [[], []], [[], []], [[], []], [[], []], [[], []],
[[], []], [[], []]]
counter = 0
for layer in ['0', '1', '2', '3']:
for population in ['0', '1']:
output = os.path.join(datapath,
'population_spikes-{}-{}.gdf'.format(layer,
population))
file_pattern = os.path.join(datapath,
'spikes_{}_{}*'.format(layer, population))
files = glob.glob(file_pattern)
print('Merge ' + str(
len(files)) + ' spike files from L' + layer + 'P' + population)
if files:
merged_file = open(output, 'w')
for f in files:
data = open(f, 'r')
for l in data:
a = l.split()
a[0] = int(a[0])
a[1] = float(a[1])
raw_first_gid = Raw_first_gids[int(layer)][int(population)]
first_gid = First_gids[int(layer)][int(population)]
a[0] = a[0] - raw_first_gid + first_gid
if (a[1] > T_start): # discard data in the start-up phase
spike_data[counter][0].append(num_neurons - a[0])
spike_data[counter][1].append(a[1] - T_start)
neuron_spikes[a[0]].append(a[1] - T_start)
converted_line = str(a[0]) + '\t' + str(a[1]) + '\n'
merged_file.write(converted_line)
data.close()
merged_file.close()
counter += 1
clrs = ['0', '0.5', '0', '0.5', '0', '0.5', '0', '0.5']
plt.ion()
# raster plot
plt.figure(1)
counter = 1
for j in np.arange(num_pops):
for i in np.arange(first_gids[j], first_gids[j] + rec_sizes[j]):
plt.plot(neuron_spikes[i],
np.ones_like(neuron_spikes[i]) + sum(rec_sizes) - counter,
'k o', ms=1, mfc=clrs[j], mec=clrs[j])
counter += 1
plt.xlim(0, T - T_start)
plt.ylim(0, sum(rec_sizes))
plt.xlabel(r'time (ms)')
plt.ylabel(r'neuron id')
plt.savefig(os.path.join(datapath, 'rasterplot.png'))
# firing rates
rates = []
temp = 0
for i in np.arange(num_pops):
for j in np.arange(first_gids[i], last_gids[i]):
temp += len(neuron_spikes[j])
rates.append(temp / (rec_sizes[i] * (T - T_start)) * 1e3)
temp = 0
print()
print('Firing rates:')
print(rates)
plt.figure(2)
ticks = np.arange(num_pops)
plt.bar(ticks, rates, width=0.9, color='k')
xticklabels = ['L2/3e', 'L2/3i', 'L4e', 'L4i', 'L5e', 'L5i', 'L6e', 'L6i']
plt.setp(plt.gca(), xticks=ticks + 0.5, xticklabels=xticklabels)
plt.xlabel(r'subpopulation')
plt.ylabel(r'firing rate (spikes/s)')
plt.savefig(os.path.join(datapath, 'firing_rates.png'))
plt.show()
| gpl-2.0 |
LiaoPan/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
evansosenko/aps-spin-lifetime-plots | plots/plot_fits_large_lifetime.py | 1 | 1435 | import matplotlib.pyplot
import os
from fit import Fit
from plot import Plot
class Plot(Plot):
def add_ylabel(self):
y = self.fit.meta['dependent']
text = r'$\Delta R_{\text{NL}}$' + ' $(\si{' + y['siunitx'] + r'})$'
super().add_ylabel(text)
def main():
figure = Plot.new_figure(figsize=(5,10))
data_path = lambda x: os.path.join('json', 'fig_4d_difference_' + x + '_lifetime.json')
fits = [ Fit(data_path(fig)) for fig in ['large', 'larger'] ]
for fit in fits:
fit.maps['value_transforms']['τ'] = lambda x: '%.2E' % round(x, 2)
fit.meta['contact_type'] = 'transparent'
plots = []
for idx, fit in enumerate(fits):
n, m = 2, 1
options = {}
if idx != 0: options['sharex'] = plots[idx - 1].plt
plot = Plot(fit, figure.add_subplot(n, m, (m * idx + 1), **options))
plot.id = 'd.' + str(idx + 1)
plots.append(plot)
for plot in plots:
plot.plot_data()
plot.plot_fit()
plot.add_ylabel()
plot.add_parameter_overlay()
plots[-1].add_xlabel()
matplotlib.pyplot.setp(plots[0].plt.get_xticklabels(), visible=False)
figure.savefig(os.path.join('build', 'plot_fits_large_lifetime.eps'), transparent=True)
plots[0].fit.save_info('build/plot_fits_large_lifetime_info.tex', 'plotFitsLargeLifetimeInfo')
Plot.close_figure(figure)
if __name__ == '__main__':
main()
| mit |
calebfoss/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
CVML/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
eezee-it/addons-yelizariev | sugarcrm_migration/import_sugarcrm.py | 16 | 44410 | # -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
try:
import MySQLdb
import MySQLdb.cursors
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base, create_childs
from openerp.addons.import_framework.mapper import *
import subprocess
def fix_email(text):
return text.replace('\r', '<br>')
class import_sugarcrm(import_base):
TABLE_USER = 'users'
TABLE_ACCOUNT = 'accounts'
TABLE_ACCOUNT_LEAD = 'accounts_leads'
TABLE_ACCOUNT_TAG = 'accounts_tags_'
TABLE_CONTACT = 'contacts'
TABLE_CONTACT_COMPANY = 'contacts_companies_'
TABLE_CONTACT_TAG = 'contacts_tags_'
TABLE_CASE = 'cases'
TABLE_CASE_TAG = 'cases_tags_'
#TABLE_EMPLOYEE = 'Employees'
#TABLE_OPPORTUNITY = 'Opportunities'
#TABLE_LEAD = 'Leads'
#TABLE_STAGE = 'crm_stage'
#TABLE_ATTENDEE = 'calendar_attendee'
#TABLE_CALL = 'Calls'
#TABLE_MEETING = 'Meetings'
#TABLE_TASK = 'Tasks'
#TABLE_PROJECT = 'Project'
#TABLE_PROJECT_TASK = 'ProjectTask'
#TABLE_BUG = 'Bugs'
TABLE_NOTE = 'Notes'
TABLE_NOTE_INTERNAL = 'notes_internal'
TABLE_EMAIL = 'emails'
#TABLE_COMPAIGN = 'Campaigns'
#TABLE_DOCUMENT = 'Documents'
#TABLE_HISTORY_ATTACHMNET = 'history_attachment'
def initialize(self):
self.db = MySQLdb.connect(host=self.context.get('db_host'),
port=int(self.context.get('db_port')),
user=self.context.get('db_user'),
passwd=self.context.get('db_passwd'),
db=self.context.get('db_name'),
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor
)
db_dump_fies = self.context.get('db_dump_fies')
if db_dump_fies:
cur = self.db.cursor()
for f in db_dump_fies:
_logger.info('load dump %s' % f)
fd = open(f, 'r')
subprocess.Popen(['mysql',
'-u', self.context.get('db_user'),
'-p{}'.format(self.context.get('db_passwd')),
'-h', self.context.get('db_host'),
'-P', self.context.get('db_port'),
self.context.get('db_name')], stdin=fd).wait()
cur.close()
def finalize(self):
pass
def finalize_note(self):
mail_message_obj = self.pool['mail.message']
ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model_tmp','=','mail.message')])
for a in self.pool['ir.attachment'].read(self.cr, self.uid, ids, ['id', 'res_id_tmp'], context=self.context):
if not a['res_id_tmp']:
continue
mail_message_obj.write(self.cr, self.uid, [a['res_id_tmp']],
{'attachment_ids':[(4, a['id'])]})
def get_data(self, table):
cur = self.db.cursor()
query = "SELECT * FROM %s" % table
#query = query + ' order by rand()' # for debug
cur.execute(query)
res = cur.fetchall()
cur.close()
return list(res)
def get_mapping(self):
res = [
self.get_mapping_user(),
self.get_mapping_account(),
self.get_mapping_contact(),
self.get_mapping_case(),
self.get_mapping_email(),
self.get_mapping_note_internal(),
self.get_mapping_note(),
]
return res
def merge_table_email(self, df, id_on='id'):
#mysql> select bean_module, count(*) from email_addr_bean_rel group by bean_module;
#+-------------+----------+
#| bean_module | count(*) |
#+-------------+----------+
#| Contacts | 1048 |
#| Leads | 31 |
#| Prospects | 20391 |
#| Users | 33 |
#+-------------+----------+
#4 rows in set (0.21 sec)
t1 = merge(df,
DataFrame(self.get_data('email_addr_bean_rel')),
how='left',
left_on=id_on,
suffixes=('', '_email_addr_bean_rel'),
right_on='bean_id')
t2 = merge(t1,
DataFrame(self.get_data('email_addresses')),
how='left',
left_on = 'email_address_id',
suffixes=('', '_email_addresses'),
right_on = 'id')
return t2
def table_user(self):
t1 = self.merge_table_email(DataFrame(self.get_data('users')))
return t1
def get_mapping_user(self):
return {
'name': self.TABLE_USER,
'table': self.table_user,
'models':[{
'model' : 'res.users',
'fields': {
'id': xml_id(self.TABLE_USER, 'id'),
'active': lambda record: not record['deleted'], # status == 'Active'
'name': concat('first_name', 'last_name'),
'login': value('user_name', fallback='last_name'),
'password' : 'user_hash',
'company_id/id': const('base.main_company'),
'alias_name': value('user_name', fallback='last_name', lower=True),
'email': 'email_address',
}
}]
}
def table_account(self):
t1 = merge(DataFrame(self.get_data('accounts')),
DataFrame(self.get_data('accounts_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:100] # for debug
return t1
def get_hook_tag(self, field_name):
def f(external_values):
res = []
value = external_values.get(field_name)
value = value or ''
if not isinstance(value, basestring):
value = str(value)
for v in value.split(','):
v = do_clean_sugar(v)
if v:
res.append({field_name:v})
return res
return f
def tag(self, model, xml_id_prefix, field_name):
parent = xml_id_prefix + field_name
return {'model':model,
'hook':self.get_hook_tag(field_name),
'fields': {
'id': xml_id(parent, field_name),
'name': field_name,
'parent_id/id':const('sugarcrm_migration.'+parent),
}
}
def context_partner(self):
# see module description
return {"skip_addr_sync":True}
def get_mapping_account(self):
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%sfirst_name%s'%(prefix, suffix),
'%slast_name%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_ACCOUNT + '_%s%s'%(prefix, suffix), 'id'),
'name': concat('%sfirst_name%s'%(prefix, suffix), '%slast_name%s'%(prefix, suffix)),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'fax': '%sfax%s'%(prefix, suffix),
'email': '%semail%s'%(prefix, suffix),
'parent_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'function': '%sjob_title%s'%(prefix, suffix),
'customer': const('1'),
'supplier': const('0'),
},
}
partner_list = [
partner('finance_', ''),
partner('pa_', '_primary_c'),
partner('pa_', '_secondary_c'),
partner('', '_primary_c'),
partner('', '_secondary_c'),
partner('', '_quantenary_c'),
partner('', '_other_c'),
]
tag_list = [
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'initial_source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'private_sector_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'rtw_organisation_type_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sales_funnel_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'shenley_holdings_company_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'status_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_customer_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sister_company_c'),
]
return {
'name': self.TABLE_ACCOUNT,
'table': self.table_account,
'dependencies' : [self.TABLE_USER],
'models': tag_list + [
# company
{
'model' : 'res.partner',
'context':self.context_partner,
'fields' :
{
'id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'is_company': const('1'),
'date': fixdate('date_entered'),
'active': lambda record: not record['deleted'],
'user_id/.id': user_by_login('account_manager_2_c'),
'website': first('website', 'website_c'),
'phone':'company_phone_c',
'email':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'city': 'company_city_c',
'zip': 'company_post_code_c',
#'state_id': 'company_region_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'country_id/.id': country_by_name('europe_c'),
'opt_out': mapper_int('unsubscribe_c'),
'customer': const('1'),
'supplier': const('0'),
'category_id/id': tags_from_fields(self.TABLE_ACCOUNT_TAG, ['initial_source_of_referral_c', 'private_sector_new_c', 'rtw_organisation_type_c', 'sales_funnel_c', 'shenley_holdings_company_new_c', 'source_of_referral_c', 'status_c', 'introduced_by_c', 'introduced_by_customer_c', 'sister_company_c',]),
'comment': ppconcat('website_c'),
}},
# realted lead
{
'model' : 'crm.lead',
'fields': {
'id': xml_id(self.TABLE_ACCOUNT_LEAD, 'id'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'phone':first('phone_office', 'telephone_c', 'company_phone_c'),
'email_from':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'probability': map_val('sales_funnel_c', self.map_lead_probability, 0),
'stage_id/id': map_val('status_c', self.map_lead_stage, 'crm.stage_lead1'),
'type': map_val('status_c', self.map_lead_type, 'lead'),
'section_id/id': const('sales_team.section_sales_department'),
}
}
] + partner_list # related contacts
}
map_lead_probability = {
'Lost': 0,
'Proposal Sent': 50,
'Prospect Identified': 1,
'Prospect Qualified': 20,
'Sales Won': 100,
'Scheduled': 100, #in sugarcrm: 150,
'Suspect': 0,
}
#mysql> select sales_funnel_c, count(*) from accounts_cstm group by sales_funnel_c;
#+---------------------+----------+
#| sales_funnel_c | count(*) |
#+---------------------+----------+
#| NULL | 4322 |
#| | 144 |
#| Lost | 1 |
#| Proposal Sent | 3 |
#| Prospect Identified | 5 |
#| Prospect Qualified | 20 |
#| Sales Won | 2 |
#| Scheduled | 1 |
#| Suspect | 62 |
map_lead_stage = {
'': 'crm.stage_lead7', # Lost
'Archived': 'crm.stage_lead2', # Dead
'Dorment': 'crm.stage_lead4', # Proposition
'Live Contact': 'crm.stage_lead6', # Won
'Pipeline': 'crm.stage_lead5', # Negotiation
'Prospect': 'crm.stage_lead1', # New
}
map_lead_type = {
'Dorment': 'opportunity',
'Live Contact': 'opportunity',
'Pipeline': 'opportunity',
}
#mysql> select status_c, count(*) from accounts_cstm group by status_c;
#+---------------+----------+
#| status_c | count(*) |
#+---------------+----------+
#| NULL | 210 |
#| | 655 |
#| Archived | 84 |
#| Dorment | 101 |
#| Live Contract | 73 |
#| Pipeline | 390 |
#| Prospect | 3047 |
#+---------------+----------+
def table_contact(self):
t1 = merge(DataFrame(self.get_data('contacts')),
DataFrame(self.get_data('contacts_cstm')),
left_on='id',
right_on='id_c'
)
t2 = self.merge_table_email(t1)
#t2 = t2[:10] # for debug
return t2
def get_mapping_contact(self):
tag_list = [
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_introducer_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ambassador_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_other_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'england_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ethnicity_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'europe_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'first_language_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'gender_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'other_languages_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'religion_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'specialism_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_new_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'trainer_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'training_experience_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'willing_to_travel_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'skill_set_c'),
]
def company(field_name):
return {'model':'res.partner',
'context':self.context_partner,
'hook':self.get_hook_ignore_empty(field_name),
'fields': {
'id': xml_id(self.TABLE_CONTACT_COMPANY, field_name),
'name': field_name,
'is_company': const('1'),
'customer': const('0'),
'supplier': const('1'),
}
}
return {
'name': self.TABLE_CONTACT,
'table': self.table_contact,
'dependencies' : [self.TABLE_USER],
'models':tag_list + [company('company_name_c')] + [{
'model' : 'res.partner',
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CONTACT, 'id'),
'name': concat('title', 'first_name', 'last_name'),
'parent_id/id': xml_id(self.TABLE_CONTACT_COMPANY, 'company_name_c'),
'create_date': 'date_entered',
'write_date': 'date_modified',
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'city': 'city_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'zip': 'company_post_code_c',
'phone':first('company_phone_c', 'home_phone_c', 'phone_home', 'phone_work', 'phone_other', 'home_telephone_c', 'business_telephone_c'),
'mobile':first('phone_mobile', 'personal_mobile_phone_c'),
'email':first('email_c', 'email_address', 'personal_email_c', 'business_email_c', 'other_email_c', 'email_2_c'),
'website': first('website', 'website_c'),
'fax': first('phone_fax', 'company_fax_c'),
'customer': const('0'),
'supplier': const('1'),
'category_id/id': tags_from_fields(self.TABLE_CONTACT_TAG, ['agreed_commission_c', 'agreed_introducer_commission_c', 'ambassador_c', 'consultant_type_c', 'consultant_type_other_c', 'england_c', 'ethnicity_c', 'europe_c', 'first_language_c', 'gender_c', 'other_languages_c', 'religion_c', 'role_c', 'role_type_c', 'skill_set_c', 'specialism_c', 'status_live_c', 'status_live_new_c', 'trainer_type_c', 'training_experience_c', 'willing_to_travel_c', ]),
'comment': ppconcat(
'description',
'phone_home',
'phone_mobile',
'phone_work',
'phone_other',
'phone_fax',
'personal_email_c',
'business_email_c',
'other_email_c',
'home_telephone_c',
'business_telephone_c',
'personal_mobile_phone_c',
'personal_telephone_c',
'home_phone_c',
'mobile_phone_c',
'other_phone_c',
'email_c',
'email_2_c',
'company_phone_c',
'company_mobile_phone_c',
'company_fax_c',
'company_phone_other_c',
'company_email_c',
'prg_email_issued_c',
'email_address_permanent_c',
'prg_email_c',
'cjsm_email_address_c',
)
}
}]
}
def table_case(self):
t1 = merge(DataFrame(self.get_data('cases')),
DataFrame(self.get_data('cases_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:10] # for debug
return t1
case_priority_mapping = {
'P1': '0',
'P2': '1',
'P3': '2'
}
case_state_mapping = {
'Awaiting Payment':'awaiting_payment',
'Cancelled':'cancelled',
'Completed':'close',
'Deferred':'pending',
'Live':'open',
'Lost':'lost',
'Pipeline':'pipeline_reactive',
'Pipeline - Proactive':'pipeline_proactive',
'Provisional':'draft',
'To be Invoiced':'to_be_invoiced',
}
def field_estimated_close_date_c(self, external_values):
estimated_close_date_c = external_values.get('estimated_close_date_c')
date = external_values.get('end_date_c')
return ''
def finalize_case(self):
ids = self.pool['account.analytic.account'].search(self.cr, self.uid, [('user_id_tmp', '!=', False)])
for r in self.pool['account.analytic.account'].read(self.cr, self.uid, ids, ['id', 'user_id_tmp']):
project_id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(r['id']))], context=self.context)
self.pool['project.project'].write(self.cr, self.uid, project_id, {'user_id':r['user_id_tmp'][0]}, context=self.context)
def get_mapping_case(self):
#mysql> select case_status_c, count(*) from cases_cstm group by case_status_c;
#+----------------------+----------+
#| case_status_c | count(*) |
#+----------------------+----------+
#| NULL | 2 |
#| | 40 |
#| Awaiting Payment | 10 |
#| Cancelled | 182 |
#| Completed | 339 |
#| Deferred | 125 |
#| Live | 25 |
#| Lost | 419 |
#| Pipeline | 60 |
#| Pipeline - Proactive | 73 |
#| Provisional | 2 |
#| To be Invoiced | 7 |
#+----------------------+----------+
def partner_participant(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scase_participant%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scase_participant%s'%(prefix, suffix),
'phone': '%sparticipant_phone%s'%(prefix, suffix),
'function': '%sparticipant_role%s'%(prefix, suffix),
'participate_in_contract_ids/id': xml_id(self.TABLE_CASE, 'id'),
'customer': const('0'),
'supplier': const('0'),
},
}
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scontact%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scontact%s'%(prefix, suffix),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'function': '%srole%s'%(prefix, suffix),
'customer': const('0'),
'supplier': const('0'),
},
}
partner_participant_list = [
partner_participant('', '_c'),
partner_participant('', '_2_c'),
partner_participant('', '_3_c'),
]
partner_list = [
partner('primary_', '_c'),
partner('secondary_', '_c'),
]
tag_list = [
self.tag('contract.category', self.TABLE_CASE_TAG, 'business_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'probability_of_closing_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'production_funnel_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_area_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'reason_lost_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'source_of_referral_c'),
]
return {
'name': self.TABLE_CASE,
'table': self.table_case,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
#self.TABLE_LEAD
],
'models': []+
tag_list+
partner_list+
[{
'model' : 'account.analytic.account',
'context': lambda : {'active_test':False},
'finalize': self.finalize_case,
'fields': {
'id': xml_id(self.TABLE_CASE, 'id'),
'name': concat('case_number_c', 'case_number', 'name', delimiter=' * '),
'type': const('contract'),
'use_tasks': const('1'),
'user_id_tmp/.id': user_by_login('case_manager_c'),
'support_manager_id/.id': user_by_login('support_case_manager_c'),
'notetaker_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id4_c', default=None),
'proof_reader_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id2_c', default=None),
'consultant_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id_c', default=None),
'business_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('secondary_', '_c')), 'id', default=None),
'commissioning_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('primary_', '_c')), 'id', default=None),
'category_id/id': tags_from_fields(self.TABLE_CASE_TAG, ['business_type_c', 'probability_of_closing_c', 'production_funnel_c', 'product_area_c', 'product_type_c', 'reason_lost_c', 'source_of_referral_c',]),
'create_date': 'date_entered',
'state': map_val('case_status_c', self.case_state_mapping, 'draft'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'account_id'),
'date_start':'end_date_c',
'date':call(self.field_estimated_close_date_c),
'description': ppconcat(
'invoiced_value_of_case_c',
),
}
}] +
partner_participant_list
}
def table_filter_modules(self, t, field_name='bean_module'):
newt = t[(t[field_name] == 'Accounts')|
(t[field_name] == 'Cases')|
(t[field_name] == 'Contacts')|
(t[field_name] == 'Notes')|
(t[field_name] == 'Emails')
]
return newt
def table_email(self):
t1 = merge(DataFrame(self.get_data('emails')),
DataFrame(self.get_data('emails_text')),
how='left',
left_on='id',
right_on='email_id'
)
t2 = merge(t1,
DataFrame(self.get_data('emails_beans')),
how='left',
left_on='id',
right_on='email_id',
suffixes = ('', '_emails_beans')
)
t3 = self.table_filter_modules(t2)
#t3 = t3[:100] # for debug
return t3
map_to_model = {
'Accounts': 'res.partner',
'Cases': 'project.project',
'Contacts': 'res.partner',
'Prospects': 'TODO',
'Emails': 'mail.message',
#'Notes': 'ir.attachment',
}
map_to_table = {
'Accounts': TABLE_ACCOUNT,
'Cases': TABLE_CASE,
'Contacts': TABLE_CONTACT,
'Prospects': 'TODO',
'Emails': TABLE_EMAIL,
#'Notes': TABLE_NOTE,
}
#mysql> select parent_type, count(*) from notes group by parent_type;
#+-------------+----------+
#| parent_type | count(*) |
#+-------------+----------+
#| NULL | 604 |
#| Accounts | 6385 |
#| Cases | 12149 |
#| Contacts | 41 |
#| Emails | 12445 |
#| Leads | 355 |
#| Meetings | 2 |
#+-------------+----------+
#7 rows in set (0.30 sec)
#
def get_mapping_email(self):
# mysql> select bean_module, count(*) from emails_beans group by bean_module;
# +---------------+----------+
# | bean_module | count(*) |
# +---------------+----------+
# | Accounts | 182 |
# | Cases | 1746 |
# | Contacts | 493 |
# | Leads | 102 |
# | Opportunities | 1 |
# | Prospects | 16819 |
# +---------------+----------+
# 6 rows in set (0.56 sec)
return {
'name': self.TABLE_EMAIL,
'table': self.table_email,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
self.TABLE_CASE,
#self.TABLE_LEAD,
#self.TABLE_OPPORTUNITY,
#self.TABLE_MEETING,
#self.TABLE_CALL
],
'models':[{
'model' : 'mail.message',
'hook': self.hook_email,
'fields': {
'id': xml_id(self.TABLE_EMAIL, 'id'),
'type':const('email'),
#mysql> select type, count(*) from emails group by type;
#+----------+----------+
#| type | count(*) |
#+----------+----------+
#| archived | 17119 |
#| draft | 8 |
#| inbound | 3004 |
#| out | 75 |
#+----------+----------+
#4 rows in set (0.76 sec)
'email_from': 'from_addr_name',
'reply_to': 'reply_to_addr',
#'same_thread': 'TODO',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'partner_ids' #many2many
#attachment_ids' #many2many
#'parent_id': 'TODO',
'model': 'model',
'res_id': 'res_id',
#record_name
'subject':'name',
'date':'date_sent',
'message_id': 'message_id',
'body': call(lambda vals, html, txt: fix_email(html or txt or ''),
value('description_html'), value('description')),
'subtype_id/id':const('mail.mt_comment'),
'notified_partner_ids/.id': emails2partners('to_addrs'),
#'state' : const('received'),
#'email_to': 'to_addrs_names',
#'email_cc': 'cc_addrs_names',
#'email_bcc': 'bcc_addrs_names',
#'partner_id/.id': 'partner_id/.id',
#'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def table_note(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t.dropna(subset=['filename'])
#t = t[:10] # for debug
return t
def table_note_internal(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t[(t['parent_type'] != 'Emails')]
#t = t[:100] # for debug
return t
def get_id_model(self, external_values, field_name='parent_id', parent_field_name='parent_type'):
id = res_id(map_val(parent_field_name, self.map_to_table), field_name)
id.set_parent(self)
id = id(external_values)
model = map_val(parent_field_name, self.map_to_model)
model = model(external_values)
if model=='project.project':
id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(id))], context=self.context)
if isinstance(id, list):
id=id[0]
return str(id),model
def hook_email(self, external_values):
id,model = self.get_id_model(external_values, field_name='bean_id', parent_field_name='bean_module')
external_values['res_id']=id
external_values['model']=model
return external_values
def hook_note(self, external_values):
parent_type = external_values.get('parent_type')
contact_id = external_values.get('contact_id')
if parent_type == 'Accounts' and contact_id:
external_values['parent_type'] = 'Contacts'
id,model = self.get_id_model(external_values, field_name='contact_id')
if id:
#print 'note Accounts fixed to Contacts'
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
external_values['parent_type'] = parent_type
id,model = self.get_id_model(external_values)
if not id:
#print 'Note not found', parent_type, external_values.get('parent_id')
return None
else:
#print 'Note FOUND', parent_type, external_values.get('parent_id')
pass
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
map_note_to_table = {
'Emails': TABLE_EMAIL
}
def get_mapping_note(self):
return {
'name': self.TABLE_NOTE,
'table': self.table_note,
'dependencies' : [self.TABLE_EMAIL,
self.TABLE_NOTE_INTERNAL,
],
'models':[{
'model': 'ir.attachment',
'context': lambda : {'active_test':False, 'quick_import':True},
'hook': self.hook_note,
'finalize': self.finalize_note,
'fields': {
'id': xml_id(self.TABLE_NOTE, 'id'),
'name':'filename',
'datas_fname':'filename',
'res_model': 'res_model',
'res_id': 'res_id',
'res_model_tmp': const('mail.message'),
'res_id_tmp': res_id(map_val('parent_type', self.map_note_to_table, default=self.TABLE_NOTE_INTERNAL), 'id'),
'store_fname': call(lambda external_values, id_value: 'sugarcrm_files/' + id_value,
value('id')),
'type':const('binary'),
#'description': 'description',
'description': const(''),
'create_date': 'date_entered',
'create_uid/id': xml_id(self.TABLE_USER, 'create_by'),
'company_id/id': const('base.main_company'),
}
}]
}
def get_mapping_note_internal(self):
return {
'name': self.TABLE_NOTE_INTERNAL,
'table': self.table_note_internal,
'dependencies' : [self.TABLE_EMAIL,
],
'models':[{
'model': 'mail.message',
'hook': self.hook_note,
'fields': {
'id': xml_id(self.TABLE_NOTE_INTERNAL, 'id'),
'subject':concat('name', 'filename', 'date_entered', delimiter=' * '),
'body': call(lambda vals, body: fix_email(body or ''),
value('description')),
'model': 'res_model',
'res_id': 'res_id',
'type':const('email'),
'date': 'date_entered',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'subtype_id/id':const('mail.mt_comment'),
}
}]
}
def get_mapping_history_attachment(self):
# is not used
res.append({
'name': self.TABLE_HISTORY_ATTACHMNET,
'model' : 'ir.attachment',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_LEAD, self.TABLE_OPPORTUNITY, self.TABLE_MEETING, self.TABLE_CALL, self.TABLE_EMAIL],
'hook' : import_history,
'models':[{
'fields': {
'name':'name',
'user_id/id': ref(self.TABLE_USER, 'created_by'),
'description': ppconcat('description', 'description_html'),
'res_id': 'res_id',
'res_model': 'model',
'partner_id/.id' : 'partner_id/.id',
'datas' : 'datas',
'datas_fname' : 'datas_fname'
}
}]
})
def get_mapping_bug():
# is not used
return {
'name': self.TABLE_BUG,
'model' : 'project.issue',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'name': concat('bug_number', 'name', delimiter='-'),
'project_id/id': call(get_bug_project_id, 'sugarcrm_bugs'),
'categ_id/id': call(get_category, 'project.issue', value('type')),
'description': ppconcat('description', 'source', 'resolution', 'work_log', 'found_in_release', 'release_name', 'fixed_in_release_name', 'fixed_in_release'),
'priority': get_project_issue_priority,
'state': map_val('status', project_issue_state),
'assigned_to/id' : ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def get_mapping_project(self):
# is not used
return {
'name': self.TABLE_PROJECT,
'model' : 'project.project',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_project,
'models':[{
'fields': {
'name': 'name',
'date_start': 'estimated_start_date',
'date': 'estimated_end_date',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/.id': 'partner_id/.id',
'contact_id/.id': 'contact_id/.id',
'state': map_val('status', project_state)
}
}]
}
def get_mapping_project_task(self):
# is not used
return {
'name': self.TABLE_PROJECT_TASK,
'model' : 'project.task',
'dependencies' : [self.TABLE_USER, self.TABLE_PROJECT],
'models':[{
'fields': {
'name': 'name',
'date_start': 'date_start',
'date_end': 'date_finish',
'project_id/id': ref(self.TABLE_PROJECT, 'project_id'),
'planned_hours': 'estimated_effort',
'priority': get_project_task_priority,
'description': ppconcat('description','milestone_flag', 'project_task_id', 'task_number', 'percent_complete'),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': 'partner_id/id',
'contact_id/id': 'contact_id/id',
'state': map_val('status', project_task_state)
}
}]
}
def get_mapping_task(self):
# is not used
return {
'name': self.TABLE_TASK,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_task,
'models':[{
'fields': {
'name': 'name',
'date': 'date',
'date_deadline': 'date_deadline',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'categ_id/id': call(get_category, 'crm.meeting', const('Tasks')),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': ref(self.TABLE_CONTACT,'contact_id'),
'state': map_val('status', task_state)
}
}]
}
def get_mapping_call(self):
# is not used
return {
'name': self.TABLE_CALL,
'model' : 'crm.phonecall',
'dependencies' : [self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD],
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'categ_id/id': call(get_category, 'crm.phonecall', value('direction')),
'opportunity_id/id': related_ref(self.TABLE_OPPORTUNITY),
'description': ppconcat('description'),
'state': map_val('status', call_state)
}
}]
}
def get_mapping_meeting(self):
# is not used
return {
'name': self.TABLE_MEETING,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD, self.TABLE_TASK],
'hook': import_meeting,
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'location': 'location',
'attendee_ids/id':'attendee_ids/id',
'alarm_id/id': call(get_alarm_id, value('reminder_time')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'state': map_val('status', meeting_state)
}
}]
}
def get_mapping_opportunity(self):
# is not used
return {
'name': self.TABLE_OPPORTUNITY,
'model' : 'crm.lead',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT,self.TABLE_COMPAIGN],
'hook' : import_opp,
'models':[{
'fields': {
'name': 'name',
'probability': 'probability',
'partner_id/id': refbyname(self.TABLE_ACCOUNT, 'account_name', 'res.partner'),
'title_action': 'next_step',
'partner_address_id/id': 'partner_address_id/id',
'planned_revenue': 'amount',
'date_deadline': 'date_closed',
'user_id/id' : ref(self.TABLE_USER, 'assigned_user_id'),
'stage_id/id' : get_opportunity_status,
'type' : const('opportunity'),
'categ_id/id': call(get_category, 'crm.lead', value('opportunity_type')),
'email_from': 'email_from',
'state': map_val('status', opp_state),
'description' : 'description',
}
}]
}
def get_mapping_compaign(self):
# is not used
return {
'name': self.TABLE_COMPAIGN,
'model' : 'crm.case.resource.type',
'models':[{
'fields': {
'name': 'name',
}
}]
}
def get_mapping_employee(self):
# is not used
return {
'name': self.TABLE_EMPLOYEE,
'model' : 'hr.employee',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'resource_id/id': get_ressource,
'name': concat('first_name', 'last_name'),
'work_phone': 'phone_work',
'mobile_phone': 'phone_mobile',
'user_id/id': ref(self.TABLE_USER, 'id'),
'address_home_id/id': get_user_address,
'notes': ppconcat('messenger_type', 'messenger_id', 'description'),
'job_id/id': get_job_id,
'work_email' : 'email1',
'coach_id/id_parent' : 'reports_to_id',
}
}]
}
| lgpl-3.0 |
great-expectations/great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_value_counts.py | 1 | 6144 | from typing import Any, Dict, Tuple
import pandas as pd
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.expectations.metrics.column_aggregate_metric import (
ColumnMetricProvider,
)
from great_expectations.expectations.metrics.import_manager import F, sa
from great_expectations.expectations.metrics.metric_provider import metric_value
class ColumnValueCounts(ColumnMetricProvider):
metric_name = "column.value_counts"
value_keys = ("sort", "collate")
default_kwarg_values = {"sort": "value", "collate": None}
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
sort = metric_value_kwargs.get("sort", cls.default_kwarg_values["sort"])
collate = metric_value_kwargs.get(
"collate", cls.default_kwarg_values["collate"]
)
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
if collate is not None:
raise ValueError("collate parameter is not supported in PandasDataset")
df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column = accessor_domain_kwargs["column"]
counts = df[column].value_counts()
if sort == "value":
try:
counts.sort_index(inplace=True)
except TypeError:
# Having values of multiple types in a object dtype column (e.g., strings and floats)
# raises a TypeError when the sorting method performs comparisons.
if df[column].dtype == object:
counts.index = counts.index.astype(str)
counts.sort_index(inplace=True)
elif sort == "counts":
counts.sort_values(inplace=True)
counts.name = "count"
counts.index.name = "value"
return counts
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
sort = metric_value_kwargs.get("sort", cls.default_kwarg_values["sort"])
collate = metric_value_kwargs.get(
"collate", cls.default_kwarg_values["collate"]
)
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
if collate is not None:
raise ValueError("collate parameter is not supported in PandasDataset")
selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column = accessor_domain_kwargs["column"]
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
query = (
sa.select(
[
sa.column(column).label("value"),
sa.func.count(sa.column(column)).label("count"),
]
)
.where(sa.column(column) != None)
.group_by(sa.column(column))
)
if sort == "value":
# NOTE: depending on the way the underlying database collates columns,
# ordering can vary. postgresql collate "C" matches default sort
# for python and most other systems, but is not universally supported,
# so we use the default sort for the system, unless specifically overridden
if collate is not None:
query = query.order_by(sa.column(column).collate(collate))
else:
query = query.order_by(sa.column(column))
elif sort == "count":
query = query.order_by(sa.column("count").desc())
results = execution_engine.engine.execute(
query.select_from(selectable)
).fetchall()
series = pd.Series(
[row[1] for row in results],
index=pd.Index(data=[row[0] for row in results], name="value"),
name="count",
)
return series
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
sort = metric_value_kwargs.get("sort", cls.default_kwarg_values["sort"])
collate = metric_value_kwargs.get(
"collate", cls.default_kwarg_values["collate"]
)
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
if collate is not None:
raise ValueError("collate parameter is not supported in SparkDFDataset")
df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column = accessor_domain_kwargs["column"]
value_counts = (
df.select(column).where(F.col(column).isNotNull()).groupBy(column).count()
)
if sort == "value":
value_counts = value_counts.orderBy(column)
elif sort == "count":
value_counts = value_counts.orderBy(F.desc("count"))
value_counts = value_counts.collect()
series = pd.Series(
[row["count"] for row in value_counts],
index=pd.Index(data=[row[column] for row in value_counts], name="value"),
name="count",
)
return series
| apache-2.0 |
nav13n/Data-Science-45min-Intros | choosing-k-in-kmeans/3d-example.py | 25 | 2925 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
"""
This script is designed to run inline (%run 3d-example.py) in
the corresponding IPython notebook. It generates a 3d scatter
plot using scikit-learn data generation and with a number of
samples and clusters determined by the variables near the top.
"""
import argparse
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.datasets import make_blobs
import seaborn as sns
from gap_stats import gap_statistics
from gap_stats import plot_gap_statistics
def make_example_plot(args):
"""
Create artificial data (blobs) and color them according to the
appropriate blob center.
"""
# read args
samples = args.samples
clusters = args.clusters
# create some data
X, y = make_blobs(n_samples=samples,
centers=clusters,
n_features=3,
# increase variance for illustration
cluster_std=1.5,
# fix random_state if you believe in determinism
#random_state=42
)
# seaborn display settings
sns.set(style='whitegrid', palette=sns.color_palette("Set2", clusters))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(clusters):
# for each center, add data to the figure w/ appropriate label
ax.plot(X[y==i,0],
X[y==i,1],
X[y==i,2],
'o',
alpha=0.6,
label='cluster {}'.format(i)
)
ax.set_title('{} labeled clusters (ground truth)'.format(clusters))
ax.legend(loc='upper left')
# seaborn settings - no, really set these things this time, please
sns.set(style='whitegrid', palette=sns.color_palette("Set2", clusters))
#plt.show()
# potentially return the data for later use
data = None
if args.gap:
data = (X, y)
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s","--samples"
, dest="samples"
, type=int
, default=100
)
parser.add_argument("-c","--clusters"
, dest="clusters"
, type=int
, default=5
)
parser.add_argument("-g","--gap"
, dest="gap"
, type=bool
, default=False
)
args = parser.parse_args()
data = make_example_plot(args)
if args.gap:
# i just really prefer the dark theme
sns.set(style='darkgrid', palette='deep')
# unpack
X, y = data
# run the gap statistic algorithm
gaps, errs, difs = gap_statistics(X, ks=range(1, args.clusters+5))
# plot (intended for %matplotlib inline)
plot_gap_statistics(gaps, errs, difs)
| unlicense |
mne-tools/mne-python | tutorials/stats-sensor-space/20_erp_stats.py | 10 | 5443 | """
===========================================================================
Visualising statistical significance thresholds on EEG data
===========================================================================
MNE-Python provides a range of tools for statistical hypothesis testing
and the visualisation of the results. Here, we show a few options for
exploratory and confirmatory tests - e.g., targeted t-tests, cluster-based
permutation approaches (here with Threshold-Free Cluster Enhancement);
and how to visualise the results.
The underlying data comes from :footcite:`DufauEtAl2015`; we contrast long vs.
short words. TFCE is described in :footcite:`SmithNichols2009`.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind
import mne
from mne.channels import find_ch_adjacency, make_1020_channel_selections
from mne.stats import spatio_temporal_cluster_test
np.random.seed(0)
# Load the data
path = mne.datasets.kiloword.data_path() + '/kword_metadata-epo.fif'
epochs = mne.read_epochs(path)
name = "NumberOfLetters"
# Split up the data by the median length in letters via the attached metadata
median_value = str(epochs.metadata[name].median())
long_words = epochs[name + " > " + median_value]
short_words = epochs[name + " < " + median_value]
#############################################################################
# If we have a specific point in space and time we wish to test, it can be
# convenient to convert the data into Pandas Dataframe format. In this case,
# the :class:`mne.Epochs` object has a convenient
# :meth:`mne.Epochs.to_data_frame` method, which returns a dataframe.
# This dataframe can then be queried for specific time windows and sensors.
# The extracted data can be submitted to standard statistical tests. Here,
# we conduct t-tests on the difference between long and short words.
time_windows = ((.2, .25), (.35, .45))
elecs = ["Fz", "Cz", "Pz"]
index = ['condition', 'epoch', 'time']
# display the EEG data in Pandas format (first 5 rows)
print(epochs.to_data_frame(index=index)[elecs].head())
report = "{elec}, time: {tmin}-{tmax} s; t({df})={t_val:.3f}, p={p:.3f}"
print("\nTargeted statistical test results:")
for (tmin, tmax) in time_windows:
long_df = long_words.copy().crop(tmin, tmax).to_data_frame(index=index)
short_df = short_words.copy().crop(tmin, tmax).to_data_frame(index=index)
for elec in elecs:
# extract data
A = long_df[elec].groupby("condition").mean()
B = short_df[elec].groupby("condition").mean()
# conduct t test
t, p = ttest_ind(A, B)
# display results
format_dict = dict(elec=elec, tmin=tmin, tmax=tmax,
df=len(epochs.events) - 2, t_val=t, p=p)
print(report.format(**format_dict))
##############################################################################
# Absent specific hypotheses, we can also conduct an exploratory
# mass-univariate analysis at all sensors and time points. This requires
# correcting for multiple tests.
# MNE offers various methods for this; amongst them, cluster-based permutation
# methods allow deriving power from the spatio-temoral correlation structure
# of the data. Here, we use TFCE.
# Calculate adjacency matrix between sensors from their locations
adjacency, _ = find_ch_adjacency(epochs.info, "eeg")
# Extract data: transpose because the cluster test requires channels to be last
# In this case, inference is done over items. In the same manner, we could
# also conduct the test over, e.g., subjects.
X = [long_words.get_data().transpose(0, 2, 1),
short_words.get_data().transpose(0, 2, 1)]
tfce = dict(start=.2, step=.2)
# Calculate statistical thresholds
t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
X, tfce, adjacency=adjacency,
n_permutations=100) # a more standard number would be 1000+
significant_points = cluster_pv.reshape(t_obs.shape).T < .05
print(str(significant_points.sum()) + " points selected by TFCE ...")
##############################################################################
# The results of these mass univariate analyses can be visualised by plotting
# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)
# and masking points for significance.
# Here, we group channels by Regions of Interest to facilitate localising
# effects on the head.
# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked([long_words.average(), short_words.average()],
weights=[1, -1]) # calculate difference wave
time_unit = dict(time_unit="s")
evoked.plot_joint(title="Long vs. short words", ts_args=time_unit,
topomap_args=time_unit) # show difference wave
# Create ROIs by checking channel labels
selections = make_1020_channel_selections(evoked.info, midline="12z")
# Visualize the results
fig, axes = plt.subplots(nrows=3, figsize=(8, 8))
axes = {sel: ax for sel, ax in zip(selections, axes.ravel())}
evoked.plot_image(axes=axes, group_by=selections, colorbar=False, show=False,
mask=significant_points, show_names="all", titles=None,
**time_unit)
plt.colorbar(axes["Left"].images[-1], ax=list(axes.values()), shrink=.3,
label="µV")
plt.show()
###############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
momiah/cvariants_opencv | ensembleInputs.py | 1 | 4269 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 16:32:55 2015
@author: mdmiah
"""
import matplotlib as mpl
mpl.use('Agg') # Needed to work on server
import numpy as np
import random
import sys
import modelInputs
# ---------------------------------- ----------------------------------
def getRowsForImages(u, v, labels, brisks, colors):
X = []
y = []
meta = []
for i in xrange(u*4, (u*4)+4):
for j in xrange(v*4, (v*4)+4):
Xrow, yrow, metarow = modelInputs.getRowForCombination(i, j, labels, brisks, colors)
X.append(Xrow)
y.append(yrow)
meta.append(metarow)
return X, y, meta
def sampleVariant(labels, brisks, colors, start=0, end=None):
if end is None:
end = colors.shape[0]-1
# Choose an image at random
u = random.randint(start, end)
label1 = labels[u][1]
# Make sure minimum number of BRISK features exist
i = u*4 + np.arange(4)
brisk1 = brisks[i,1:]
# If ANY of them have brisk features of lower than the threshold, then skip
if np.sum( np.sum(brisk1,axis=1)<modelInputs.minBriskFeatures ):
return sampleVariant(labels, brisks, colors, start, end)
# Match with a color variant
for m in xrange(20): # Search nearby for a color variant
v = random.randint(u-50, u+50)
v = start if v<start else end if v>end else v
if u==v: # Don't match with itself
continue
label2 = labels[v][1]
# Make sure minimum number of BRISK features exist
j = v*4 + np.arange(4)
brisk2 = brisks[j,1:]
if np.sum( np.sum(brisk2,axis=1)<modelInputs.minBriskFeatures ):
continue
if label1==label2:
return getRowsForImages(u, v, labels, brisks, colors)
# If the randomly chosen image has no color variant,
# sample again
return sampleVariant(labels, brisks, colors)
def sampleNonVariant(labels, brisks, colors, start=0, end=None):
if end is None:
end = colors.shape[0]-1
# Choose an image at random
u = random.randint(start, end)
label1 = labels[u][1]
# Make sure minimum number of BRISK features exist
i = u*4 + np.arange(4)
brisk1 = brisks[i,1:]
# If ANY of them have brisk features of lower than the threshold, then skip
if np.sum( np.sum(brisk1,axis=1)<modelInputs.minBriskFeatures ):
return sampleNonVariant(labels, brisks, colors, start, end)
# Match random non variants
label2 = label1
while label1==label2:
v = random.randint(start, end)
# Make sure minimum number of BRISK features exist
j = v*4 + np.arange(4)
brisk2 = brisks[j,1:]
if np.sum( np.sum(brisk2,axis=1)<modelInputs.minBriskFeatures ):
continue
label2 = labels[v][1]
return getRowsForImages(u, v, labels, brisks, colors)
def save(no_of_pairs = 10000):
labels, brisks, colors = modelInputs.loadHists()
test_start_n = no_of_pairs * modelInputs.train_fraction # Fraction of dataset used in training
test_start_u = np.int((colors.shape[0]-1) * modelInputs.train_fraction) # Fraction of images used in training
with open("Cache/X1.csv", "w") as X_fh, open("Cache/y1.csv", "w") as y_fh, open("Cache/Xmeta1.csv", "w") as meta_fh:
for n in xrange(no_of_pairs):
if n<test_start_n:
start = 0
end = test_start_u-1
else:
start = test_start_u
end = None
X, y, meta = sampleVariant(labels, brisks, colors, start, end)
X2, y2, meta2 = sampleNonVariant(labels, brisks, colors, start, end)
X.extend(X2)
y.extend(y2)
meta.extend(meta2)
np.savetxt(X_fh, X, delimiter=",", fmt="%f")
np.savetxt(y_fh, y, delimiter=",", fmt="%d")
np.savetxt(meta_fh, meta, delimiter=",", fmt="%d")
if (n+1)%1000==0:
percentage_completion = 100.0*np.float(n+1)/no_of_pairs
sys.stdout.write(str(n+1)+" of "+str(no_of_pairs)+" done ("+str(percentage_completion)+"%)\r")
sys.stdout.flush()
print ""
| gpl-2.0 |
jostep/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 31 | 60315 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
scls19fr/blaze | blaze/compute/tests/test_sql_compute.py | 6 | 56473 | from __future__ import absolute_import, division, print_function
import pytest
sa = pytest.importorskip('sqlalchemy')
import itertools
import re
from distutils.version import LooseVersion
import datashape
from odo import into, resource, discover
from pandas import DataFrame
from toolz import unique
from blaze.compute.sql import compute, select, lower_column, compute_up
from blaze.expr import (
symbol, transform, summary, by, sin, join,
floor, cos, merge, nunique, mean, sum, count, exp
)
from blaze.compatibility import xfail
from blaze.utils import tmpfile, example
def computefull(t, s):
return select(compute(t, s))
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture(scope='module')
def data():
# make the engine
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
# name table
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
name.create()
# city table
city = sa.Table('city', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
city.create()
s = symbol('s', discover(engine))
return {'engine': engine, 'metadata': metadata, 'name': name, 'city': city,
's': s}
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
tdate = symbol('t',
"""var * {
name: string,
amount: int,
id: int,
occurred_on: datetime
}""")
ns = sa.Table('nullaccounts', metadata,
sa.Column('name', sa.String, nullable=True),
sa.Column('amount', sa.REAL),
sa.Column('id', sa.Integer, primary_key=True),
)
sdate = sa.Table('accdate', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('occurred_on', sa.DateTime))
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
def normalize(s):
s = ' '.join(s.strip().split()).lower()
s = re.sub(r'(alias)_?\d*', r'\1', s)
return re.sub(r'__([A-Za-z_][A-Za-z_0-9]*)', r'\1', s)
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
print(compute(t[['name', 'amount']], s))
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s, post_compute=False)) == \
str(s.c.amount == 100)
def test_eq_unicode():
assert str(compute(t['name'] == u'Alice', s, post_compute=False)) == \
str(s.c.name == u'Alice')
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(compute(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s, post_compute=False)) == \
str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s, post_compute=False)) == \
str(s.c.amount * s.c.id)
assert str(compute(t['amount'] * 2, s, post_compute=False)) == \
str(s.c.amount * 2)
assert str(compute(2 * t['amount'], s, post_compute=False)) == \
str(2 * s.c.amount)
assert (str(compute(~(t['amount'] > 10), s, post_compute=False)) ==
"accounts.amount <= :amount_1")
assert str(compute(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name""")
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.fields)
# test sort on join
result = compute(joined.sort('amount'), {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
select
anon_1.name,
anon_1.amount,
anon_1.id
from (select
amounts.name as name,
amounts.amount as amount,
ids.id as id
from
amounts
join
ids
on
amounts.name = ids.name) as anon_1
order by
anon_1.amount asc""")
def test_clean_complex_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L[L.amount > 0], R, 'name')
result = compute(joined, {L: lhs, R: rhs})
expected1 = """
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name
WHERE amounts.amount > :amount_1"""
expected2 = """
SELECT alias.name, alias.amount, ids.id
FROM (SELECT amounts.name AS name, amounts.amount AS amount
FROM amounts
WHERE amounts.amount > :amount_1) AS alias
JOIN ids ON alias.name = ids.name"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_multi_column_join():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
joined = join(L, R, ['x', 'y'])
expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
& (lhs.c.y == rhs.c.y))
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
print(result.c.keys())
print(joined.fields)
assert list(result.c.keys()) == list(joined.fields)
def test_unary_op():
assert str(compute(exp(t['amount']), s, post_compute=False)) == \
str(sa.func.exp(s.c.amount))
assert str(compute(-t['amount'], s, post_compute=False)) == \
str(-s.c.amount)
@pytest.mark.parametrize('unbiased', [True, False])
def test_std(unbiased):
assert str(compute(t.amount.std(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'stddev_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
@pytest.mark.parametrize('unbiased', [True, False])
def test_var(unbiased):
assert str(compute(t.amount.var(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'var_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
def test_reductions():
assert str(compute(sum(t['amount']), s, post_compute=False)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount_sum' == compute(
sum(t['amount']), s, post_compute=False).name
def test_reduction_with_invalid_axis_argument():
with pytest.raises(ValueError):
compute(t.amount.mean(axis=1))
with pytest.raises(ValueError):
compute(t.count(axis=1))
with pytest.raises(ValueError):
compute(t[['amount', 'id']].count(axis=1))
def test_nelements():
rhs = str(compute(t.count(), s))
assert str(compute(t.nelements(), s)) == rhs
assert str(compute(t.nelements(axis=None), s)) == rhs
assert str(compute(t.nelements(axis=0), s)) == rhs
assert str(compute(t.nelements(axis=(0,)), s)) == rhs
@pytest.mark.xfail(raises=Exception, reason="We don't support axis=1 for"
" Record datashapes")
def test_nelements_axis_1():
assert compute(t.nelements(axis=1), s) == len(s.columns)
def test_count_on_table():
result = compute(t.count(), s)
assert normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts""")
result = compute(t[t.amount > 0].count(), s)
assert (
normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts
WHERE accounts.amount > :amount_1""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
WHERE accounts.amount > :amount_1) as alias"""))
def test_distinct():
result = str(compute(t['amount'].distinct(), s, post_compute=False))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_distinct_multiple_columns():
assert normalize(str(compute(t.distinct(), s))) == normalize("""
SELECT DISTINCT accounts.name, accounts.amount, accounts.id
FROM accounts""")
def test_nunique():
result = str(computefull(nunique(t['amount']), s))
print(result)
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
def test_nunique_table():
result = normalize(str(computefull(t.nunique(), s)))
expected = normalize("""SELECT count(alias.id) AS tbl_row_count
FROM (SELECT DISTINCT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
assert result == expected
@xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sa.sql.functions.any(s.c.amount > 150))
def test_by():
expr = by(t['name'], total=t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('total')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = by(t2['name'], total=t2['amount'].sum())
result = compute(expr, s)
# s2 = select(s).limit(100)
# expected = sa.select([s2.c.name,
# sa.sql.functions.sum(s2.c.amount).label('amount_sum')]
# ).group_by(s2.c.name)
expected = """
SELECT alias.name, sum(alias.amount) as total
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS ID
FROM accounts
LIMIT :param_1) as alias
GROUP BY alias.name"""
expected = """
SELECT accounts.name, sum(accounts.amount) as total
FROM accounts
GROUP by accounts.name
LIMIT :param_1"""
assert normalize(str(result)) == normalize(str(expected))
def test_by_two():
expr = by(tbig[['name', 'sex']], total=tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('total')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum()),
sbig)
assert normalize(str(result)) == normalize("""
SELECT accountsbig.name,
accountsbig.sex,
sum(accountsbig.id + accountsbig.amount) AS total
FROM accountsbig GROUP BY accountsbig.name, accountsbig.sex
""")
def test_by_summary_clean():
expr = by(t.name, min=t.amount.min(), max=t.amount.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.amount) AS max, min(accounts.amount) AS min
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_by_summary_single_column():
expr = by(t.name, n=t.name.count(), biggest=t.name.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.name) AS biggest, count(accounts.name) AS n
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
want = join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'join' in str(result).lower()
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(sa.asc(s.c.amount)))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount)))
def test_multicolumn_sort():
assert str(compute(t.sort(['amount', 'id']), s)) == \
str(select(s).order_by(sa.asc(s.c.amount), sa.asc(s.c.id)))
assert str(compute(t.sort(['amount', 'id'], ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount), sa.desc(s.c.id)))
def test_sort_on_distinct():
assert normalize(str(compute(t.amount.sort(), s))) == normalize("""
SELECT accounts.amount
FROM accounts
ORDER BY accounts.amount ASC""")
assert normalize(str(compute(t.amount.distinct().sort(), s))) == normalize("""
SELECT DISTINCT accounts.amount as amount
FROM accounts
ORDER BY amount ASC""")
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert (str(compute((t['amount'] * 10).label('foo'),
s, post_compute=False)) ==
str((s.c.amount * 10).label('foo')))
def test_relabel_table():
result = compute(t.relabel(name='NAME', id='ID'), s)
expected = select([
s.c.name.label('NAME'),
s.c.amount,
s.c.id.label('ID'),
])
assert str(result) == str(expected)
def test_relabel_projection():
result = compute(
t[['name', 'id']].relabel(name='new_name', id='new_id'),
s,
)
assert normalize(str(result)) == normalize(
"""SELECT
accounts.name AS new_name,
accounts.id AS new_id
FROM accounts""",
)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_projection_of_selection():
print(compute(t[t['amount'] < 0][['name', 'amount']], s))
assert len(str(compute(t[t['amount'] < 0], s))) > \
len(str(compute(t[t['amount'] < 0][['name', 'amount']], s)))
def test_outer_join():
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
engine = resource(uri)
_left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = resource(uri, 'left', dshape=L.dshape)
into(left, _left)
_right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = resource(uri, 'right', dshape=R.dshape)
into(right, _right)
conn = engine.connect()
query = compute(join(L, R, how='inner'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='left'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='right'),
{L: left, R: right},
post_compute=False)
print(query)
result = list(map(tuple, conn.execute(query).fetchall()))
print(result)
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# SQLAlchemy doesn't support full outer join
"""
query = compute(join(L, R, how='outer'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
"""
conn.close()
def test_summary():
expr = summary(a=t.amount.sum(), b=t.id.count())
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
def test_summary_clean():
t2 = t[t.amount > 0]
expr = summary(a=t2.amount.sum(), b=t2.id.count())
result = str(compute(expr, s))
assert normalize(result) == normalize("""
SELECT sum(accounts.amount) as a, count(accounts.id) as b
FROM accounts
WHERE accounts.amount > :amount_1""")
def test_summary_by():
expr = by(t.name, summary(a=t.amount.sum(), b=t.id.count()))
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
assert 'group by accounts.name' in result.lower()
def test_clean_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
friends = sa.Table('friends', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
tcity = symbol('city', discover(city))
tfriends = symbol('friends', discover(friends))
tname = symbol('name', discover(name))
ns = {tname: name, tfriends: friends, tcity: city}
expr = join(tfriends, tname, 'a', 'id')
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT friends.a, friends.b, name.name
FROM friends JOIN name on friends.a = name.id""")
expr = join(join(tfriends, tname, 'a', 'id'), tcity, 'a', 'id')
result = compute(expr, ns)
expected1 = """
SELECT friends.a, friends.b, name.name, place.city, place.country
FROM friends
JOIN name ON friends.a = name.id
JOIN place ON friends.a = place.id
"""
expected2 = """
SELECT alias.a, alias.b, alias.name, place.city, place.country
FROM (SELECT friends.a AS a, friends.b AS b, name.name AS name
FROM friends JOIN name ON friends.a = name.id) AS alias
JOIN place ON alias.a = place.id
"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_like():
expr = t.like(name='Alice*')
assert normalize(str(compute(expr, s))) == normalize("""
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
WHERE accounts.name LIKE :name_1""")
def test_strlen():
expr = t.name.strlen()
result = str(compute(expr, s))
expected = "SELECT char_length(accounts.name) as name FROM accounts"
assert normalize(result) == normalize(expected)
def test_columnwise_on_complex_selection():
result = str(select(compute(t[t.amount > 0].amount + 1, s)))
assert normalize(result) == \
normalize("""
SELECT accounts.amount + :amount_1 AS amount
FROM accounts
WHERE accounts.amount > :amount_2
""")
def test_reductions_on_complex_selections():
assert normalize(str(select(compute(t[t.amount > 0].id.sum(), s)))) == \
normalize("""
with alias as
(select accounts.id as id
from
accounts
where
accounts.amount > :amount_1)
select sum(alias.id) as id_sum from alias""")
def test_clean_summary_by_where():
t2 = t[t.id == 1]
expr = by(t2.name, sum=t2.amount.sum(), count=t2.amount.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.amount) AS count, sum(accounts.amount) AS sum
FROM accounts
WHERE accounts.id = :id_1
GROUP BY accounts.name
""")
def test_by_on_count():
expr = by(t.name, count=t.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.id) AS count
FROM accounts
GROUP BY accounts.name
""")
def test_join_complex_clean():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
expr = join(tname[tname.id > 0], tcity, 'id')
result = compute(expr, ns)
expected1 = """
SELECT name.id, name.name, place.city, place.country
FROM name JOIN place ON name.id = place.id
WHERE name.id > :id_1"""
expected2 = """
SELECT alias.id, alias.name, place.city, place.country
FROM (SELECT name.id as id, name.name AS name
FROM name
WHERE name.id > :id_1) AS alias
JOIN place ON alias.id = place.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_projection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
expr = join(tname, tcity[tcity.city == 'NYC'], 'id')[['country', 'name']]
ns = {tname: name, tcity: city}
result = compute(expr, ns)
expected1 = """
SELECT place.country, name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1"""
expected2 = """
SELECT alias.country, name.name
FROM name
JOIN (SELECT place.id AS id, place.city AS city, place.country AS country
FROM place
WHERE place.city = :city_1) AS alias
ON name.id = alias_6.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_lower_column():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
assert lower_column(name.c.id) is name.c.id
assert lower_column(select(name).c.id) is name.c.id
j = name.join(city, name.c.id == city.c.id)
col = [c for c in j.columns if c.name == 'country'][0]
assert lower_column(col) is city.c.country
def test_selection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
j = join(tname, tcity, 'id')
expr = j[j.city == 'NYC'].name
result = compute(expr, ns)
assert normalize(str(result)) == normalize("""
SELECT name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1""")
def test_join_on_same_table():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
expr = join(t, t, 'a')
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
tab_left.a,
tab_left.b as b_left,
tab_right.b as b_right
FROM
tab AS tab_left
JOIN
tab AS tab_right
ON
tab_left.a = tab_right.a
""")
expr = join(t, t, 'a').b_left.sum()
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
with alias as
(select
tab_left.b as b_left
from
tab as tab_left
join
tab as tab_right
on
tab_left.a = tab_right.a)
select sum(alias.b_left) as b_left_sum from alias""")
expr = join(t, t, 'a')
expr = summary(total=expr.a.sum(), smallest=expr.b_right.min())
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
min(tab_right.b) as smallest,
sum(tab_left.a) as total
FROM
tab AS tab_left
JOIN
tab AS tab_right
ON
tab_left.a = tab_right.a
""")
def test_join_suffixes():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
suffixes = '_l', '_r'
expr = join(t, t, 'a', suffixes=suffixes)
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
tab{l}.a,
tab{l}.b as b{l},
tab{r}.b as b{r}
FROM
tab AS tab{l}
JOIN
tab AS tab{r}
ON
tab{l}.a = tab{r}.a
""".format(l=suffixes[0], r=suffixes[1]))
def test_field_access_on_engines(data):
s, engine = data['s'], data['engine']
result = compute_up(s.city, engine)
assert isinstance(result, sa.Table)
assert result.name == 'city'
def test_computation_directly_on_sqlalchemy_Tables(data):
name = data['name']
s = symbol('s', discover(name))
result = into(list, compute(s.id + 1, name))
assert not isinstance(result, sa.sql.Selectable)
assert list(result) == []
def test_computation_directly_on_metadata(data):
metadata = data['metadata']
name = data['name']
s = symbol('s', discover(metadata))
result = compute(s.name, {s: metadata}, post_compute=False)
assert result == name
sql_bank = sa.Table('bank', sa.MetaData(),
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
sql_cities = sa.Table('cities', sa.MetaData(),
sa.Column('name', sa.String),
sa.Column('city', sa.String))
bank = symbol('bank', discover(sql_bank))
cities = symbol('cities', discover(sql_cities))
def test_aliased_views_with_two_group_bys():
expr = by(bank.name, total=bank.amount.sum())
expr2 = by(expr.total, count=expr.name.count())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(alias.name) as count
FROM (SELECT bank.name AS name, sum(bank.amount) AS total
FROM bank
GROUP BY bank.name) as alias
GROUP BY alias.total
""")
def test_aliased_views_with_join():
joined = join(bank, cities)
expr = by(joined.city, total=joined.amount.sum())
expr2 = by(expr.total, count=expr.city.nunique())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(DISTINCT alias.city) AS count
FROM (SELECT cities.city AS city, sum(bank.amount) AS total
FROM bank
JOIN cities ON bank.name = cities.name
GROUP BY cities.city) as alias
GROUP BY alias.total
""")
def test_select_field_on_alias():
result = compute_up(t.amount, select(s).limit(10).alias('foo'))
assert normalize(str(select(result))) == normalize("""
SELECT foo.amount
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
LIMIT :param_1) as foo""")
@pytest.mark.xfail(raises=Exception,
reason="sqlalchemy.join seems to drop unnecessary tables")
def test_join_on_single_column():
expr = join(cities[['name']], bank)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
expr = join(bank, cities.name)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
def test_aliased_views_more():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
expr = join(by(L.x, y_total=L.y.sum()),
R)
result = compute(expr, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x """)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
result2 = compute(expr2, {L: lhs, R: rhs})
assert (
normalize(str(result2)) == normalize("""
SELECT alias_2.w, count(alias_2.x) as count, sum(alias_2.y_total) as total2
FROM (SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x) AS alias_2
GROUP BY alias_2.w""")
or
normalize(str(result2)) == normalize("""
SELECT bbb.w, count(alias.x) as count, sum(alias.y_total) as total2
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) as alias
JOIN bbb ON alias.x = bbb.x
GROUP BY bbb.w"""))
def test_aliased_views_with_computation():
engine = sa.create_engine('sqlite:///:memory:')
df_aaa = DataFrame({'x': [1, 2, 3, 2, 3],
'y': [2, 1, 2, 3, 1],
'z': [3, 3, 3, 1, 2]})
df_bbb = DataFrame({'w': [1, 2, 3, 2, 3],
'x': [2, 1, 2, 3, 1],
'y': [3, 3, 3, 1, 2]})
df_aaa.to_sql('aaa', engine)
df_bbb.to_sql('bbb', engine)
metadata = sa.MetaData(engine)
metadata.reflect()
sql_aaa = metadata.tables['aaa']
sql_bbb = metadata.tables['bbb']
L = symbol('aaa', discover(df_aaa))
R = symbol('bbb', discover(df_bbb))
expr = join(by(L.x, y_total=L.y.sum()),
R)
a = compute(expr, {L: df_aaa, R: df_bbb})
b = compute(expr, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
a = compute(expr2, {L: df_aaa, R: df_bbb})
b = compute(expr2, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr3 = by(expr.x, count=expr.y_total.count())
a = compute(expr3, {L: df_aaa, R: df_bbb})
b = compute(expr3, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr4 = join(expr2, R)
a = compute(expr4, {L: df_aaa, R: df_bbb})
b = compute(expr4, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
""" # Takes a while
expr5 = by(expr4.count, total=(expr4.x + expr4.y).sum())
a = compute(expr5, {L: df_aaa, R: df_bbb})
b = compute(expr5, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
"""
def test_distinct_count_on_projection():
expr = t[['amount']].distinct().count()
result = compute(expr, {t: s})
assert (
normalize(str(result)) == normalize("""
SELECT count(DISTINCT accounts.amount)
FROM accounts""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.amount) as count
FROM (SELECT DISTINCT accounts.amount AS amount
FROM accounts) as alias"""))
# note that id is the primary key
expr = t[['amount', 'id']].distinct().count()
result = compute(expr, {t: s})
assert normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT DISTINCT accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
def test_join_count():
ds = datashape.dshape(
'{t1: var * {x: int, y: int}, t2: var * {a: int, b: int}}')
engine = resource('sqlite:///:memory:', dshape=ds)
db = symbol('db', ds)
expr = join(db.t1[db.t1.x > -1], db.t2, 'x', 'a').count()
result = compute(expr, {db: engine}, post_compute=False)
expected1 = """
SELECT count(alias.x) as count
FROM (SELECT t1.x AS x, t1.y AS y, t2.b AS b
FROM t1 JOIN t2 ON t1.x = t2.a
WHERE t1.x > ?) as alias
"""
expected2 = """
SELECT count(alias2.x) AS count
FROM (SELECT alias1.x AS x, alias1.y AS y, t2.b AS b
FROM (SELECT t1.x AS x, t1.y AS y
FROM t1
WHERE t1.x > ?) AS alias1
JOIN t2 ON alias1.x = t2.a) AS alias2"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_transform_where():
t2 = t[t.id == 1]
expr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
abs(accounts.amount) as abs_amt,
sin(accounts.id) as sine
FROM accounts
WHERE accounts.id = :id_1
"""
assert normalize(str(result)) == normalize(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_merge_where():
t2 = t[t.id == 1]
expr = merge(t2[['amount', 'name']], t2.id)
result = compute(expr, s)
expected = normalize("""SELECT
accounts.amount,
accounts.name,
accounts.id
FROM accounts
WHERE accounts.id = :id_1
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_single_column():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_multiple_columns():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean(), sum_sine=tr.sine.sum())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) AS sum_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_different_order():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr.name,
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_projection():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr[['name', 'id']],
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
accounts.id,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name, accounts.id
""")
assert normalize(str(result)) == expected
def test_merge_compute():
data = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
ds = datashape.dshape('var * {id: int, name: string, amount: real}')
s = symbol('s', ds)
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
into(uri + '::table', data, dshape=ds)
expr = transform(s, amount10=s.amount * 10)
result = into(list, compute(expr, {s: data}))
assert result == [(1, 'Alice', 100, 1000),
(2, 'Bob', 200, 2000),
(4, 'Dennis', 400, 4000)]
def test_notnull():
result = compute(nt[nt.name.notnull()], ns)
expected = """SELECT
nullaccounts.name,
nullaccounts.amount,
nullaccounts.id
FROM nullaccounts
WHERE nullaccounts.name is not null
"""
assert normalize(str(result)) == normalize(expected)
def test_head_limit():
assert compute(t.head(5).head(10), s)._limit == 5
assert compute(t.head(10).head(5), s)._limit == 5
assert compute(t.head(10).head(10), s)._limit == 10
def test_no_extraneous_join():
ds = """ {event: var * {name: ?string,
operation: ?string,
datetime_nearest_receiver: ?datetime,
aircraft: ?string,
temperature_2m: ?float64,
temperature_5cm: ?float64,
humidity: ?float64,
windspeed: ?float64,
pressure: ?float64,
include: int64},
operation: var * {name: ?string,
runway: int64,
takeoff: bool,
datetime_nearest_close: ?string}}
"""
db = resource('sqlite:///:memory:', dshape=ds)
d = symbol('db', dshape=ds)
expr = join(d.event[d.event.include == True],
d.operation[['name', 'datetime_nearest_close']],
'operation', 'name')
result = compute(expr, db)
assert normalize(str(result)) == normalize("""
SELECT
alias.operation,
alias.name as name_left,
alias.datetime_nearest_receiver,
alias.aircraft,
alias.temperature_2m,
alias.temperature_5cm,
alias.humidity,
alias.windspeed,
alias.pressure,
alias.include,
alias.datetime_nearest_close
FROM
(SELECT
event.name AS name,
event.operation AS operation,
event.datetime_nearest_receiver AS datetime_nearest_receiver,
event.aircraft AS aircraft,
event.temperature_2m AS temperature_2m,
event.temperature_5cm AS temperature_5cm,
event.humidity AS humidity,
event.windspeed AS windspeed,
event.pressure AS pressure,
event.include AS include
FROM
event WHERE event.include = 1) AS alias1
JOIN
(SELECT
operation.name AS name,
operation.datetime_nearest_close as datetime_nearest_close
FROM operation) AS alias2
ON
alias1.operation = alias2.name
""")
def test_math():
result = compute(sin(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT sin(accounts.amount) as amount
FROM accounts""")
result = compute(floor(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount) as amount
FROM accounts""")
result = compute(t.amount // 2, s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount / :amount_1) AS amount
FROM accounts""")
def test_transform_order():
r = transform(t, sin_amount=sin(t.amount), cos_id=cos(t.id))
result = compute(r, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
cos(accounts.id) as cos_id,
sin(accounts.amount) as sin_amount
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
def test_isin():
result = t[t.name.isin(['foo', 'bar'])]
result_sql_expr = str(compute(result, s))
expected = """
SELECT
accounts.name,
accounts.amount,
accounts.id
FROM
accounts
WHERE
accounts.name
IN
(:name_1,
:name_2)
"""
assert normalize(result_sql_expr) == normalize(expected)
@pytest.mark.skipif('1.0.0' <= LooseVersion(sa.__version__) <= '1.0.1',
reason=("SQLAlchemy generates different code in 1.0.0"
" and 1.0.1"))
def test_date_grouper_repeats_not_one_point_oh():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY EXTRACT(year FROM t.ds)
"""
assert normalize(result) == normalize(expected)
@pytest.mark.skipif(LooseVersion(sa.__version__) < '1.0.0' or
LooseVersion(sa.__version__) >= '1.0.2',
reason=("SQLAlchemy generates different code in < 1.0.0 "
"and >= 1.0.2"))
def test_date_grouper_repeats():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY ds_year
"""
assert normalize(result) == normalize(expected)
def test_transform_then_project_single_column():
expr = transform(t, foo=t.id + 1)[['foo', 'id']]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_transform_then_project():
proj = ['foo', 'id']
expr = transform(t, foo=t.id + 1)[proj]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_reduce_does_not_compose():
expr = by(t.name, counts=t.count()).counts.max()
result = str(compute(expr, s))
expected = """WITH alias AS
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name)
SELECT max(alias.counts) AS counts_max
FROM alias"""
assert normalize(result) == normalize(expected)
@pytest.mark.xfail(raises=NotImplementedError)
def test_normalize_reduction():
expr = by(t.name, counts=t.count())
expr = transform(expr, normed_counts=expr.counts / expr.counts.max())
result = str(compute(expr, s))
expected = """WITH alias AS
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name)
SELECT alias.counts / max(alias.counts) AS normed_counts
FROM alias"""
assert normalize(result) == normalize(expected)
def test_do_not_erase_group_by_functions_with_datetime():
t, s = tdate, sdate
expr = by(t[t.amount < 0].occurred_on.date,
avg_amount=t[t.amount < 0].amount.mean())
result = str(compute(expr, s))
expected = """SELECT
date(accdate.occurred_on) as occurred_on_date,
avg(accdate.amount) as avg_amount
FROM
accdate
WHERE
accdate.amount < :amount_1
GROUP BY
date(accdate.occurred_on)
"""
assert normalize(result) == normalize(expected)
def test_not():
expr = t.amount[~t.name.isin(('Billy', 'Bob'))]
result = str(compute(expr, s))
expected = """SELECT
accounts.amount
FROM
accounts
WHERE
accounts.name not in (:name_1, :name_2)
"""
assert normalize(result) == normalize(expected)
def test_slice():
start, stop, step = 50, 100, 1
result = str(compute(t[start:stop], s))
# Verifies that compute is translating the query correctly
assert result == str(select(s).offset(start).limit(stop))
# Verifies the query against expected SQL query
expected = """
SELECT accounts.name, accounts.amount, accounts.id FROM accounts
LIMIT :param_1 OFFSET :param_2
"""
assert normalize(str(result)) == normalize(str(expected))
# Step size of 1 should be alright
compute(t[start:stop:step], s)
@pytest.mark.xfail(raises=ValueError)
def test_slice_step():
start, stop, step = 50, 100, 2
compute(t[start:stop:step], s)
def test_datetime_to_date():
expr = tdate.occurred_on.date
result = str(compute(expr, sdate))
expected = """SELECT
DATE(accdate.occurred_on) as occurred_on_date
FROM
accdate
"""
assert normalize(result) == normalize(expected)
def test_sort_compose():
expr = t.name[:5].sort()
result = compute(expr, s)
expected = """select
anon_1.name
from (select
accounts.name as name
from
accounts
limit :param_1
offset :param_2) as anon_1
order by
anon_1.name asc"""
assert normalize(str(result)) == normalize(expected)
assert (normalize(str(compute(t.sort('name').name[:5], s))) !=
normalize(expected))
def test_coerce():
expr = t.amount.coerce(to='int64')
expected = """SELECT
cast(accounts.amount AS BIGINT) AS amount
FROM accounts"""
result = compute(expr, s)
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform():
tbl = transform(t, new_amount=t.amount + 1, one_two=t.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform_and_filter():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_access_on_transform_filter():
tbl = transform(t, new_amount=t.amount + 1)
expr = tbl[tbl.name == 'Alice'].new_amount
result = compute(expr, s)
expected = """SELECT
accounts.amount + :amount_1 as new_amount
FROM
accounts
WHERE
accounts.name = :name_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_on_filter_transform_groupby():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
gb = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
expr = gb.avg_amt
result = compute(expr, s)
expected = """SELECT
avg(accounts.amount + :amount_1) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_2
"""
assert normalize(str(result)) == normalize(expected)
def test_label_projection():
tbl = t[(t.name == 'Alice')]
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
expr = tbl[['new_amount', 'one_two']]
# column selection shouldn't affect the resulting SQL
result = compute(expr[expr.new_amount > 1].one_two, s)
result2 = compute(expr.one_two[expr.new_amount > 1], s)
expected = """SELECT
accounts.amount * :amount_1 as one_two
FROM accounts
WHERE accounts.name = :name_1 and accounts.amount + :amount_2 > :param_1
"""
assert normalize(str(result)) == normalize(expected)
assert normalize(str(result2)) == normalize(expected)
def test_baseball_nested_by():
data = resource('sqlite:///%s' % example('teams.db'))
dshape = discover(data)
d = symbol('d', dshape)
expr = by(d.teams.name,
start_year=d.teams.yearID.min()).start_year.count_values()
result = compute(expr, data, post_compute=False)
expected = """SELECT
anon_1.start_year,
anon_1.count
FROM
(SELECT
alias.start_year as start_year,
count(alias.start_year) as count
FROM
(SELECT
min(teams.yearid) as start_year
FROM teams
GROUP BY teams.name) as alias
GROUP BY alias.start_year) as anon_1 ORDER BY anon_1.count DESC
"""
assert normalize(str(result).replace('"', '')) == normalize(expected)
def test_label_on_filter():
expr = t[t.name == 'Alice'].amount.label('foo').head(2)
result = compute(expr, s)
expected = """SELECT
accounts.amount AS foo
FROM
accounts
WHERE
accounts.name = :name_1
LIMIT :param_1
"""
assert normalize(str(result)) == normalize(expected)
def test_single_field_filter():
expr = t.amount[t.amount > 0]
result = compute(expr, s)
expected = """SELECT
accounts.amount
FROM accounts
WHERE accounts.amount > :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_multiple_field_filter():
expr = t.name[t.amount > 0]
result = compute(expr, s)
expected = """SELECT
accounts.name
FROM accounts
WHERE accounts.amount > :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_distinct_on_label():
expr = t.name.label('foo').distinct()
result = compute(expr, s)
expected = """SELECT
DISTINCT accounts.name AS foo
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
| bsd-3-clause |
spallavolu/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
oyamad/QuantEcon.py | docs/sphinxext/ipython_directive.py | 12 | 29458 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VĂĄclavĹ milauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import ast
import cStringIO
import os
import re
import sys
import tempfile
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from pdb import set_trace
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'% ''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
self.datacontent = data
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
store_history = True
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if is_semicolon or is_suppress:
store_history = False
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s%s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output.decode('utf-8'))
if not is_okexcept and "Traceback" in output:
sys.stdout.write(output)
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
fmtin = self.promptin
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line) # preserve empty lines in output
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with multilines
if not multiline: # not currently on a multiline
if line_stripped.endswith('\\'): # now we are
multiline = True
cont_len = len(str(lineno)) + 2
line_to_process = line.strip('\\')
output.extend([u"%s %s" % (fmtin%lineno,line)])
continue
else: # no we're still not
line_to_process = line.strip('\\')
else: # we are currently on a multiline
line_to_process += line.strip('\\')
if line_stripped.endswith('\\'): # and we still are
continuation = '.' * cont_len
output.extend([(u' %s: '+line_stripped) % continuation])
continue
# else go ahead and run this multiline then carry on
# get output of line
self.process_input_line(unicode(line_to_process.strip()),
store_history=False)
out_line = self.cout.getvalue()
self.clear_cout()
# clear current figure if plotted
if savefig:
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
# line numbers don't actually matter, they're replaced later
if not multiline:
in_line = u"%s %s" % (fmtin%lineno,line)
output.extend([in_line])
else:
output.extend([(u' %s: '+line_stripped) % continuation])
multiline = False
if len(out_line):
output.extend([out_line])
output.extend([u''])
return output
def process_pure_python2(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
# nuke empty lines
content = [line for line in content if len(line.strip()) > 0]
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception:
multiline = True
multiline_start = lineno
else:
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
ast.parse('\n'.join(content[multiline_start:lineno+1]))
if (lineno < len(content) - 1 and
_count_indent(content[multiline_start]) <
_count_indent(content[lineno + 1])):
continue
output.extend([continuation, u''])
multiline = False
except Exception:
pass
continue
return output
def _count_indent(x):
import re
m = re.match('(\s+)(.*)', x)
if not m:
return 0
return len(m.group(1))
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.current_content = self.content
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python2(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
# hack
# if row == '':
# continue
# lines.extend([' %s'% row.strip()])
lines.extend([' %s' % line
for line in re.split('[\n]+', row)])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
n
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| bsd-3-clause |
tyiannak/pyTextClassification | textClassification.py | 1 | 8754 | from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import sys, os, time, numpy, glob, scipy, shutil
import argparse
import matplotlib.pyplot as plt
import matplotlib
import itertools
import operator
import datetime
from nltk import stem
from nltk.tokenize import word_tokenize
from pyAudioAnalysis import audioTrainTest
import cPickle
import random
MAX_FILES_PER_CLASS = 300
def classifierWrapper(classifier, testSample):
R = classifier.predict(testSample.reshape(1,-1))[0]
P = classifier.predict_proba(testSample.reshape(1,-1))[0]
return [R, P]
def loadModel(modelName, isRegression=False):
try:
fo = open(modelName+"MEANS", "rb")
except IOError:
print "Load Model: Didn't find file"
return
try:
MEAN = cPickle.load(fo)
STD = cPickle.load(fo)
if not isRegression:
classNames = cPickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
with open(modelName, 'rb') as fid:
Classifier = cPickle.load(fid)
if isRegression:
return(Classifier, MEAN, STD)
else:
return(Classifier, MEAN, STD, classNames)
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def parse_arguments():
parser = argparse.ArgumentParser(description="Real time audio analysis")
tasks = parser.add_subparsers(
title="subcommands", description="available tasks", dest="task", metavar="")
trainFromDirs = tasks.add_parser("trainFromDirs", help="Train text classifier from list of directories (each directory corresponds to a different text class and has a set of documents)")
trainFromDirs.add_argument("-i", "--input", required=True, help="Input directory where the the subdirectories-classes are stored")
trainFromDirs.add_argument("--method", choices=["svm", "knn", "randomforest","gradientboosting", "extratrees"], default="svm", help="Classification method")
trainFromDirs.add_argument("--methodname", required=True, help="Classifier path")
classifyFile = tasks.add_parser("classifyFile", help="Classify an unknown document stored in a folder")
classifyFile.add_argument("-i", "--input", required=True, help="Input file where the the unknown document is stored")
#classifyFile.add_argument("--method", choices=["svm", "knn", "randomforest","gradientboosting", "extratrees"], default="m1", help="Classification method")
classifyFile.add_argument("--methodname", required=True, help="Classifier folder path")
return parser.parse_args()
def getListOfFilesInDir(dirName, pattern):
if os.path.isdir(dirName):
strFilePattern = os.path.join(dirName, pattern)
else:
strFilePattern = dirName + pattern
textFilesList = []
textFilesList.extend(glob.glob(strFilePattern))
textFilesList = sorted(textFilesList)
return textFilesList
def loadDictionaries(dictFolder):
dictFiles = getListOfFilesInDir(dictFolder, "*.dict")
porter = stem.porter.PorterStemmer()
dicts = []
for d in dictFiles:
with open(d) as f:
temp = f.readlines()
temp = [(x.lower().replace("\n","").replace("\r","")) for x in temp]
dicts.append(temp)
return dicts
def getFeaturesFromText(text, dicts):
nDicts = len(dicts)
curF = numpy.zeros((nDicts, 1))
words = word_tokenize(text.decode('utf-8'))
words = [w.lower() for w in words]
for w in words:
for i, di in enumerate(dicts):
if w in di:
curF[i] += 1
curF /= len(text)
return curF
def trainTextClassifiers(directoryPath, classifierType, classifierName):
subdirectories = get_immediate_subdirectories(directoryPath)
#tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features = 10000, stop_words='english')
dicts = loadDictionaries("myDicts/")
classNames = []
Features = []
# extract features from corpus
for si, s in enumerate(subdirectories): # for each directory in training data
print "Training folder {0:d} of {1:d} ({2:s})".format(si+1, len(subdirectories), s),
files = getListOfFilesInDir(directoryPath + os.sep + s, "*") # get list of files in directory
if MAX_FILES_PER_CLASS > 0 and MAX_FILES_PER_CLASS < len(files):
files = random.sample(files, MAX_FILES_PER_CLASS)
print " - {0:d} files".format(len(files))
classNames.append(s)
for ifile, fi in enumerate(files): # for each file in current class:
with open(fi) as f:
content = f.read()
curF = getFeaturesFromText(content, dicts) # get feature vector
if ifile ==0 : # update feature matrix
Features.append(curF.T)
else:
Features[-1] = numpy.concatenate((Features[-1], curF.T), axis = 0)
# define classifier parameters
if classifierType == "svm":
classifierParams = numpy.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0])
elif classifierType == "randomforest":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "knn":
classifierParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15])
elif classifierType == "gradientboosting":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "extratrees":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
# evaluate classifier and select best param
nExp = 10
bestParam = audioTrainTest.evaluateClassifier(Features, subdirectories, nExp, classifierType, classifierParams, 0, 0.9)
# normalize features
C = len(classNames)
[featuresNorm, MEAN, STD] = audioTrainTest.normalizeFeatures(Features)
MEAN = MEAN.tolist(); STD = STD.tolist()
featuresNew = featuresNorm
# save the classifier to file
if classifierType == "svm":
Classifier = audioTrainTest.trainSVM(featuresNew, bestParam)
elif classifierType == "randomforest":
Classifier = audioTrainTest.trainRandomForest(featuresNew, bestParam)
elif classifierType == "gradientboosting":
Classifier = audioTrainTest.trainGradientBoosting(featuresNew, bestParam)
elif classifierType == "extratrees":
Classifier = audioTrainTest.trainExtraTrees(featuresNew, bestParam)
if 'Classifier' in locals():
with open(classifierName, 'wb') as fid: # save to file
cPickle.dump(Classifier, fid)
fo = open(classifierName + "MEANS", "wb")
cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classNames, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
def classifyFile(documentPath, modelPath):
Threshold = 1.4
t1 = time.time()
Classifier, MEAN, STD, classNames = loadModel(modelPath) # load classifier
t2 = time.time()
with open(documentPath) as f: # load text fule
text = f.read()
t3 = time.time()
dicts = loadDictionaries("myDicts/") # load dicts
t4 = time.time()
F = getFeaturesFromText(text, dicts) # extract features
t5 = time.time()
F = (F.flatten()- MEAN) / STD # normalize
R, P = classifierWrapper(Classifier, F)
t6 = time.time()
'''
print t2-t1
print t3-t2
print t4-t3
print t5-t4
print t6-t5
for i in range(len(P)):
print classNames[i], P[i]
'''
meanP = 1.0 / float(len(classNames))
Results = [(y,x) for (y,x) in sorted(zip(P, classNames), reverse=True)]
for r in Results:
if r[0] > Threshold * meanP:
print r[1], r[0]
return 0
if __name__ == "__main__":
args = parse_arguments()
if args.task == "trainFromDirs":
trainTextClassifiers(args.input, args.method, args.methodname)
if args.task == "classifyFile":
classifyFile(args.input, args.methodname)
| apache-2.0 |
dingocuster/edaHelper | treeTest.py | 1 | 1918 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 15:15:03 2015
@author: d
"""
from sklearn.tree import DecisionTreeClassifier,DecisionTreeRegressor, tree, _tree, _utils,export_graphviz,export
import seaborn as sns
from bokeh.sampledata.autompg import autompg
from pprint import pprint
import pandas as pd
titanic = sns.load_dataset("titanic")
y=titanic.pop('survived')
d=DecisionTreeClassifier()
X=titanic[[u'pclass', u'age', u'sibsp', u'parch', u'fare',
u'adult_male',
u'alone']]
#d.fit(X,y)
y=autompg['mpg']
X=autompg.drop(['name','mpg'],axis=1)
r=DecisionTreeRegressor(min_samples_leaf=25)
r.fit(X,y)
from sklearn.externals.six import StringIO
with open("mpg.dot", 'w') as f:
f = export_graphviz(r, out_file=f, feature_names=X.columns)
pprint(zip(X.columns[r.tree_.feature],r.tree_.threshold,r.tree_.children_left,r.tree_.children_right,r.tree_.value))
#import os
#os.unlink('iris.dot')
#def tree_to_dict(tree)
d={}
d['feature']=X.columns[r.tree_.feature]
d['threshold']=r.tree_.threshold
d['left_children']=r.tree_.children_left
d['right_children']=r.tree_.children_right
d['value']=r.tree_.value.flatten()
d['impurity']=r.tree_.impurity
tree_df=pd.DataFrame(d)
print tree_df
#try to navigate tree
features,values,left_split=[],[],[]
features.append(tree_df.ix[0].feature)
values.append(tree_df.ix[0].value)
left_split.append(None)
class Tree(object):
def __init__(self,feature,root,leaf,parent,is_left,right_child,left_child,threshold):
self.feature=feature
self.is_root=root
self.is_leaf=leaf
self.parent=parent
self.is_left=is_left
self.right_child=right_child
self.left_child=left_child
self.threshold=threshold
#t=Tree(None,True,False,None,None,None,None,None)
for i in tree_df.iteritems()[0]:
print i
def navigate_tree_df(df):
series=df.ix[0]
t=Tree(series.feature,True,False,None,None,Naviga) | bsd-3-clause |
nirdizati/nirdizati-runtime | PredictiveMethods/CaseOutcome/batch/SequenceEncoder.py | 1 | 5183 | """
Copyright (c) 2016-2017 The Nirdizati Project.
This file is part of "Nirdizati".
"Nirdizati" is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
"Nirdizati" is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this program.
If not, see <http://www.gnu.org/licenses/lgpl.html>.
"""
import pandas as pd
from sklearn.feature_extraction import DictVectorizer as DV
class SequenceEncoder():
def __init__(self, nr_events, event_nr_col, case_id_col, label_col, static_cols=[], dynamic_cols=[],
last_state_cols=[], cat_cols=[], oversample_fit=True, minority_label="positive", fillna=True,
random_state=None):
self.nr_events = nr_events
self.static_cols = static_cols
self.dynamic_cols = dynamic_cols
self.last_state_cols = last_state_cols
self.cat_cols = cat_cols
self.event_nr_col = event_nr_col
self.case_id_col = case_id_col
self.label_col = label_col
self.oversample_fit = oversample_fit
self.minority_label = minority_label
self.random_state = random_state
self.fillna = fillna
self.fitted_columns = None
def fit(self, X):
return self
def fit_transform(self, X):
data = self._encode(X)
if self.oversample_fit:
data = self._oversample(data)
return data
def transform(self, X):
data = self._encode(X)
return data
def _encode(self, X):
# encode static cols
if self.label_col not in self.static_cols:
self.static_cols.append(self.label_col)
if self.case_id_col not in self.static_cols:
self.static_cols.append(self.case_id_col)
data_final = X[X[self.event_nr_col] == 1][self.static_cols]
# encode dynamic cols
for i in range(1, self.nr_events + 1):
data_selected = X[X[self.event_nr_col] == i][[self.case_id_col] + self.dynamic_cols]
data_selected.columns = [self.case_id_col] + ["%s_%s" % (col, i) for col in self.dynamic_cols]
data_final = pd.merge(data_final, data_selected, on=self.case_id_col, how="right")
# encode last state cols
for col in self.last_state_cols:
data_final = pd.merge(data_final, X[X[self.event_nr_col] == self.nr_events][[self.case_id_col, col]],
on=self.case_id_col, how="right")
for idx, row in data_final.iterrows():
current_nr_events = self.nr_events - 1
while pd.isnull(data_final.loc[idx, col]) and current_nr_events > 0:
data_final.loc[idx, col] = X[(X[self.case_id_col] == row[self.case_id_col]) & (
X[self.event_nr_col] == current_nr_events)].iloc[0][col]
current_nr_events -= 1
# make categorical
dynamic_cat_cols = [col for col in self.cat_cols if col in self.dynamic_cols]
static_cat_cols = [col for col in self.cat_cols if col in self.static_cols]
catecorical_cols = ["%s_%s" % (col, i) for i in range(1, self.nr_events + 1) for col in
dynamic_cat_cols] + static_cat_cols
cat_df = data_final[catecorical_cols]
cat_dict = cat_df.T.to_dict().values()
vectorizer = DV(sparse=False)
vec_cat_dict = vectorizer.fit_transform(cat_dict)
cat_data = pd.DataFrame(vec_cat_dict, columns=vectorizer.feature_names_)
data_final = pd.concat([data_final.drop(catecorical_cols, axis=1), cat_data], axis=1)
if self.fitted_columns is not None:
missing_cols = self.fitted_columns[~self.fitted_columns.isin(data_final.columns)]
for col in missing_cols:
data_final[col] = 0
data_final = data_final[self.fitted_columns]
else:
self.fitted_columns = data_final.columns
# fill NA
if self.fillna:
for col in data_final:
dt = data_final[col].dtype
if dt == int or dt == float:
data_final[col].fillna(0, inplace=True)
else:
data_final[col].fillna("", inplace=True)
return data_final
def _oversample(self, X):
oversample_count = sum(X[self.label_col] != self.minority_label) - sum(X[self.label_col] == self.minority_label)
if oversample_count > 0 and sum(X[self.label_col] == self.minority_label) > 0:
oversampled_data = X[X[self.label_col] == self.minority_label].sample(oversample_count, replace=True,
random_state=self.random_state)
X = pd.concat([X, oversampled_data])
return X
| lgpl-3.0 |
kou/arrow | python/pyarrow/tests/parquet/test_parquet_writer.py | 4 | 8362 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import pyarrow as pa
from pyarrow import fs
from pyarrow.filesystem import FileSystem, LocalFileSystem
from pyarrow.tests.parquet.common import parametrize_legacy_dataset
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import _read_table, _test_dataframe
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
except ImportError:
pd = tm = None
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_incremental_file_build(tempdir, use_legacy_dataset):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
def test_validate_schema_write_table(tempdir):
# ARROW-2926
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
# simple_table schema does not match simple_schema
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.0',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_writer_context_obj(tempdir, use_legacy_dataset):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_writer_context_obj_with_exception(
tempdir, use_legacy_dataset
):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
@pytest.mark.parametrize("filesystem", [
None,
LocalFileSystem._get_instance(),
fs.LocalFileSystem(),
])
def test_parquet_writer_filesystem_local(tempdir, filesystem):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
path = str(tempdir / 'data.parquet')
with pq.ParquetWriter(
path, table.schema, filesystem=filesystem, version='2.0'
) as writer:
writer.write_table(table)
result = _read_table(path).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.s3
def test_parquet_writer_filesystem_s3(s3_example_fs):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
fs, uri, path = s3_example_fs
with pq.ParquetWriter(
path, table.schema, filesystem=fs, version='2.0'
) as writer:
writer.write_table(table)
result = _read_table(uri).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.s3
def test_parquet_writer_filesystem_s3_uri(s3_example_fs):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
fs, uri, path = s3_example_fs
with pq.ParquetWriter(uri, table.schema, version='2.0') as writer:
writer.write_table(table)
result = _read_table(path, filesystem=fs).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_parquet_writer_filesystem_s3fs(s3_example_s3fs):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
fs, directory = s3_example_s3fs
path = directory + "/test.parquet"
with pq.ParquetWriter(
path, table.schema, filesystem=fs, version='2.0'
) as writer:
writer.write_table(table)
result = _read_table(path, filesystem=fs).to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_parquet_writer_filesystem_buffer_raises():
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
filesystem = fs.LocalFileSystem()
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError, match="specified path is file-like"):
pq.ParquetWriter(
pa.BufferOutputStream(), table.schema, filesystem=filesystem
)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_parquet_writer_with_caller_provided_filesystem(use_legacy_dataset):
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(
pa.BufferReader(buf), use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
| apache-2.0 |
0x0all/scikit-learn | sklearn/datasets/lfw.py | 28 | 17953 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
import logging
import numpy as np
try:
import urllib.request as urllib #for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) / (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) / (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person: int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
Parameters
----------
subset: optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home: optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 67
pixels.
pairs : numpy array of shape (2200, 2, 62, 67)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.20/_downloads/3a2b9aa2deb5cb4e8da2ffe2be1bff69/plot_mixed_source_space_inverse.py | 2 | 5183 | """
===================================================================
Compute MNE inverse solution on evoked data in a mixed source space
===================================================================
Create a mixed source space and compute MNE inverse solution on an
evoked dataset.
"""
# Author: Annalisa Pascarella <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
from nilearn import plotting
import mne
from mne.minimum_norm import make_inverse_operator, apply_inverse
# Set dir
data_path = mne.datasets.sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
###############################################################################
# Set up our source space.
# List substructures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Get a surface-based source space, here with few source points for speed
# in this demonstration, in general you should use oct6 spacing!
src = mne.setup_source_space(subject, spacing='oct5',
add_dist=False, subjects_dir=subjects_dir)
# Now we create a mixed src space by adding the volume regions specified in the
# list labels_vol. First, read the aseg file and the source space bounds
# using the inner skull surface (here using 10mm spacing to save time,
# we recommend something smaller like 5.0 in actual analyses):
vol_src = mne.setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=False, # just for speed, usually this should be True
verbose=True)
# Generate the mixed source space
src += vol_src
# Visualize the source space.
src.plot(subjects_dir=subjects_dir)
n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))
###############################################################################
# We could write the mixed source space with::
#
# >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#
# We can also export source positions to nift file and visualize it again:
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')
# Compute the fwd matrix
fwd = mne.make_forward_solution(
fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
# Compute inverse solution and for each epoch
snr = 3.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(evoked.info, fwd, noise_cov,
depth=None, fixed=False)
stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(
subject, parc=parc, subjects_dir=subjects_dir)
###############################################################################
# Average the source estimates within each label of the cortical parcellation
# and each sub structure contained in the src space
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(
[stc], labels_parc, src, mode='mean', allow_empty=True)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stc.times, label_ts[0][71, :].T, 'r', label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
| bsd-3-clause |
walterreade/scikit-learn | sklearn/gaussian_process/gpc.py | 42 | 31571 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default: "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
vsporeddy/bigbang | bigbang/process.py | 1 | 4872 | from bigbang.parse import get_date
import pandas as pd
import datetime
import networkx as nx
import numpy as np
import email.utils
import re
import Levenshtein
from functools import partial
def consolidate_senders_activity(activity_df, to_consolidate):
"""
takes a DataFrame in the format returned by activity
takes a list of tuples of format ('from 1', 'from 2') to consolidate
returns the consolidated DataFrame (a copy, not in place)
"""
df = activity_df.copy(deep=True)
for consolidate in to_consolidate:
column_a, column_b = consolidate
if column_a in df.columns and column_b in df.columns:
df[column_a] = df[column_a] + df[column_b]
df.drop(column_b, inplace=True, axis=1) # delete the second column
return df
def matricize(series, func):
"""
create a matrix by applying func to pairwise combos of elements in a Series
returns a square matrix as a DataFrame
should return a symmetric matrix if func(a,b) == func(b,a)
should return the identity matrix if func == '=='
"""
matrix = pd.DataFrame(columns=series, index=series)
for index, element in enumerate(series):
for second_index, second_element in enumerate(series):
matrix.iloc[index, second_index] = func(element, second_element)
return matrix
def minimum_but_not_self(column, dataframe):
minimum = 100
for index, value in dataframe[column].iteritems():
if index == column:
continue
if value < minimum:
minimum = value
return minimum
def sorted_matrix(from_dataframe,limit=None,sort_key=None):
if limit is None:
limit = len(from_dataframe.columns)
distancedf = matricize(from_dataframe.columns[:limit], from_header_distance)
# specify that the values in the matrix are integers
df = distancedf.astype(int)
if sort_key is not None:
#sort_for_this_df = partial(minimum_but_not_self, dataframe=df)
new_columns = sorted(df.columns, key=sort_key)
new_df = df.reindex(index=new_columns, columns=new_columns)
return df
def resolve_sender_entities(arx):
"""
Given an Archive, return a list of lists, each containing
message senders ('From' fields) that have been groups to be
probably the same entity.
"""
act = arx.get_activity()
# senders orders by descending total activity
senders = act.sum(0).order(ascending=False)
senders_act = senders.index
# senders in lexical order
senders_lex = act.columns.order()
senders_lex_dict = dict([(p[1],p[0]) for p in enumerate(senders_lex)])
n = len(senders)
# binary matrix of similarity between entries
sim = np.zeros((n,n))
# find similarity
for i in range(n):
name = senders_act[i]
i = senders_lex_dict[name]
# checking only lexically close entries and
# in proportion to total activity
# is a performance hack.
for j in range(i - (n - i + 1) / 2, i + (n - i + 1) / 2):
d = from_header_distance(senders_lex[i],senders_lex[j])
sim[i,j] = (d == 0)
# An entity is a connected component of the resulting graph
G = nx.Graph(sim)
entities_list = [[senders_lex[j] for j in x] for x in nx.connected_components(G)]
# given each entity a label based on its most active 'member'
entities_dict = {}
for e in entities_list:
# TODO: tighten up this labeling function
label = sorted(e,key=lambda n:senders[n],reverse=True)[0]
entities_dict[label] = e
return entities_dict
ren = "([\w\+\.\-]+(\@| at )[\w+\.\-]*) \((.*)\)"
def from_header_distance(a, b):
"""
A distance measure specifically for the 'From' header of emails.
Normalizes based on common differences in client handling of email,
then computes Levenshtein distance between components of the field.
"""
# this translate table is one way you are supposed to
# delete characters from a unicode string
stop_characters = unicode('"<>')
stop_characters_map = dict((ord(char), None) for char in stop_characters)
a_normal = unicode(a).lower().translate(stop_characters_map).replace(' at ','@')
b_normal = unicode(b).lower().translate(stop_characters_map).replace(' at ','@')
ag = re.match(ren,a_normal)
bg = re.match(ren,b_normal)
dist = float("inf")
if ag is None or bg is None:
print "malformed pair:"
print ag
print bg
dist = Levenshtein.distance(a_normal, b_normal)
else:
dist = Levenshtein.distance(ag.groups()[0],bg.groups()[0]) \
+ Levenshtein.distance(ag.groups()[1],bg.groups()[1])
if len(ag.groups()[2]) > 5 and len(ag.groups()[2]) > 5:
dist = min(dist,Levenshtein.distance(ag.groups()[2],bg.groups()[2]))
return dist
| gpl-2.0 |
Vimos/scikit-learn | sklearn/externals/joblib/__init__.py | 54 | 5087 | """Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
========================= ================================================
**User documentation:** http://pythonhosted.org/joblib
**Download packages:** http://pypi.python.org/pypi/joblib#downloads
**Source code:** http://github.com/joblib/joblib
**Report issues:** http://github.com/joblib/joblib/issues
========================= ================================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.11'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| bsd-3-clause |
rubikloud/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
nathbo/GO_DILab | src/explore/.openai_examples/reinforcement_q_learning.py | 1 | 9049 | # Reinforcement Learning (DQN) tutorial
# =====================================
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from copy import deepcopy
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as T
env = gym.make('CartPole-v0').unwrapped
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
use_cuda = torch.cuda.is_available()
use_cuda = False
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
######################################################################
# Replay Memory
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
######################################################################
# DQN algorithm
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
self.head = nn.Linear(448, 2)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
######################################################################
# Input extraction
resize = T.Compose([T.ToPILImage(),
T.Scale(40, interpolation=Image.CUBIC),
T.ToTensor()])
# This is based on the code from gym.
screen_width = 600
def get_cart_location():
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART
def get_screen():
screen = env.render(mode='rgb_array').transpose(
(2, 0, 1)) # transpose into torch order (CHW)
# Strip off the top and bottom of the screen
screen = screen[:, 160:320]
view_width = 320
cart_location = get_cart_location()
if cart_location < view_width // 2:
slice_range = slice(view_width)
elif cart_location > (screen_width - view_width // 2):
slice_range = slice(-view_width, None)
else:
slice_range = slice(cart_location - view_width // 2,
cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
screen = screen[:, :, slice_range]
# Convert to float, rescare, convert to torch tensor
# (this doesn't require a copy)
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
# Resize, and add a batch dimension (BCHW)
return resize(screen).unsqueeze(0).type(Tensor)
env.reset()
plt.figure()
plt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(),
interpolation='none')
plt.title('Example extracted screen')
plt.show()
######################################################################
# Training
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
model = DQN()
if use_cuda:
model.cuda()
optimizer = optim.RMSprop(model.parameters())
memory = ReplayMemory(10000)
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
return model(
Variable(state, volatile=True).type(FloatTensor)).data.max(1)[1].view(1, 1)
else:
return LongTensor([[random.randrange(2)]])
episode_durations = []
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.FloatTensor(episode_durations)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
######################################################################
# Training loop
last_sync = 0
def optimize_model():
global last_sync
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
non_final_mask = ByteTensor(tuple(map(lambda s: s is not None,
batch.next_state)))
# We don't want to backprop through the expected action values and volatile
# will save us on temporarily changing the model parameters'
# requires_grad to False!
non_final_next_states = Variable(torch.cat([s for s in batch.next_state
if s is not None]),
volatile=True)
state_batch = Variable(torch.cat(batch.state))
action_batch = Variable(torch.cat(batch.action))
reward_batch = Variable(torch.cat(batch.reward))
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
state_action_values = model(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
next_state_values = Variable(torch.zeros(BATCH_SIZE).type(Tensor))
next_state_values[non_final_mask] = model(non_final_next_states).max(1)[0]
# Now, we don't want to mess up the loss with a volatile flag, so let's
# clear it. After this, we'll just end up with a Variable that has
# requires_grad=False
next_state_values.volatile = False
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in model.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
######################################################################
#
# Below, you can find the main training loop. At the beginning we reset
# the environment and initialize the ``state`` variable. Then, we sample
# an action, execute it, observe the next screen and the reward (always
# 1), and optimize our model once. When the episode ends (our model
# fails), we restart the loop.
#
# Below, `num_episodes` is set small. You should download
# the notebook and run lot more epsiodes.
num_episodes = 10
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
last_screen = get_screen()
current_screen = get_screen()
state = current_screen - last_screen
for t in count():
# Select and perform an action
action = select_action(state)
_, reward, done, _ = env.step(action[0, 0])
reward = Tensor([reward])
# Observe new state
last_screen = current_screen
current_screen = get_screen()
if not done:
next_state = current_screen - last_screen
else:
next_state = None
# Store the transition in memory
memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize_model()
if done:
episode_durations.append(t + 1)
plot_durations()
break
print('Complete')
env.render(close=True)
env.close()
plt.ioff()
plt.show()
| mit |
sumitsourabh/opencog | opencog/python/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/path.py | 2 | 37755 | """
A module for dealing with the polylines used throughout matplotlib.
The primary class for polyline handling in matplotlib is :class:`Path`.
Almost all vector drawing makes use of Paths somewhere in the drawing
pipeline.
Whilst a :class:`Path` instance itself cannot be drawn, there exists
:class:`~matplotlib.artist.Artist` subclasses which can be used for
convenient Path visualisation - the two most frequently used of these are
:class:`~matplotlib.patches.PathPatch` and
:class:`~matplotlib.collections.PathCollection`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
from weakref import WeakValueDictionary
import numpy as np
from . import _path, rcParams
from .cbook import (_to_unmasked_float_array, simple_linear_interpolation,
maxdict)
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
or :meth:`cleaned` to get the vertex/code pairs. This is important,
since many :class:`Path` objects, as an optimization, do not store a
*codes* at all, but have a default one provided for them by
:meth:`iter_segments`.
Some behavior of Path objects can be controlled by rcParams. See
the rcParams whose keys contain 'path.'.
.. note::
The vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 79 # 1 vertex
#: A dictionary mapping Path codes to the number of vertices that the
#: code expects.
NUM_VERTICES_FOR_CODE = {STOP: 1,
MOVETO: 1,
LINETO: 1,
CURVE3: 2,
CURVE4: 3,
CLOSEPOLY: 1}
code_type = np.uint8
def __init__(self, vertices, codes=None, _interpolation_steps=1,
closed=False, readonly=False):
"""
Create a new path with the given vertices and codes.
Parameters
----------
vertices : array_like
The ``(n, 2)`` float array, masked array or sequence of pairs
representing the vertices of the path.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
codes : {None, array_like}, optional
n-length array integers representing the codes of the path.
If not None, codes must be the same length as vertices.
If None, *vertices* will be treated as a series of line segments.
_interpolation_steps : int, optional
Used as a hint to certain projections, such as Polar, that this
path should be linearly interpolated immediately before drawing.
This attribute is primarily an implementation detail and is not
intended for public use.
closed : bool, optional
If *codes* is None and closed is True, vertices will be treated as
line segments of a closed polygon.
readonly : bool, optional
Makes the path behave in an immutable way and sets the vertices
and codes as read-only arrays.
"""
vertices = _to_unmasked_float_array(vertices)
if (vertices.ndim != 2) or (vertices.shape[1] != 2):
msg = "'vertices' must be a 2D list or array with shape Nx2"
raise ValueError(msg)
if codes is not None:
codes = np.asarray(codes, self.code_type)
if (codes.ndim != 1) or len(codes) != len(vertices):
msg = ("'codes' must be a 1D list or array with the same"
" length of 'vertices'")
raise ValueError(msg)
if len(codes) and codes[0] != self.MOVETO:
msg = ("The first element of 'code' must be equal to 'MOVETO':"
" {0}")
raise ValueError(msg.format(self.MOVETO))
elif closed:
codes = np.empty(len(vertices), dtype=self.code_type)
codes[0] = self.MOVETO
codes[1:-1] = self.LINETO
codes[-1] = self.CLOSEPOLY
self._vertices = vertices
self._codes = codes
self._interpolation_steps = _interpolation_steps
self._update_values()
if readonly:
self._vertices.flags.writeable = False
if self._codes is not None:
self._codes.flags.writeable = False
self._readonly = True
else:
self._readonly = False
@classmethod
def _fast_from_codes_and_verts(cls, verts, codes, internals=None):
"""
Creates a Path instance without the expense of calling the constructor
Parameters
----------
verts : numpy array
codes : numpy array
internals : dict or None
The attributes that the resulting path should have.
Allowed keys are ``readonly``, ``should_simplify``,
``simplify_threshold``, ``has_nonfinite`` and
``interpolation_steps``.
"""
internals = internals or {}
pth = cls.__new__(cls)
pth._vertices = _to_unmasked_float_array(verts)
pth._codes = codes
pth._readonly = internals.pop('readonly', False)
pth.should_simplify = internals.pop('should_simplify', True)
pth.simplify_threshold = (
internals.pop('simplify_threshold',
rcParams['path.simplify_threshold'])
)
pth._has_nonfinite = internals.pop('has_nonfinite', False)
pth._interpolation_steps = internals.pop('interpolation_steps', 1)
if internals:
raise ValueError('Unexpected internals provided to '
'_fast_from_codes_and_verts: '
'{0}'.format('\n *'.join(internals)))
return pth
def _update_values(self):
self._simplify_threshold = rcParams['path.simplify_threshold']
self._should_simplify = (
self._simplify_threshold > 0 and
rcParams['path.simplify'] and
len(self._vertices) >= 128 and
(self._codes is None or np.all(self._codes <= Path.LINETO))
)
self._has_nonfinite = not np.isfinite(self._vertices).all()
@property
def vertices(self):
"""
The list of vertices in the `Path` as an Nx2 numpy array.
"""
return self._vertices
@vertices.setter
def vertices(self, vertices):
if self._readonly:
raise AttributeError("Can't set vertices on a readonly Path")
self._vertices = vertices
self._update_values()
@property
def codes(self):
"""
The list of codes in the `Path` as a 1-D numpy array. Each
code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
or `CLOSEPOLY`. For codes that correspond to more than one
vertex (`CURVE3` and `CURVE4`), that code will be repeated so
that the length of `self.vertices` and `self.codes` is always
the same.
"""
return self._codes
@codes.setter
def codes(self, codes):
if self._readonly:
raise AttributeError("Can't set codes on a readonly Path")
self._codes = codes
self._update_values()
@property
def simplify_threshold(self):
"""
The fraction of a pixel difference below which vertices will
be simplified out.
"""
return self._simplify_threshold
@simplify_threshold.setter
def simplify_threshold(self, threshold):
self._simplify_threshold = threshold
@property
def has_nonfinite(self):
"""
`True` if the vertices array has nonfinite values.
"""
return self._has_nonfinite
@property
def should_simplify(self):
"""
`True` if the vertices array should be simplified.
"""
return self._should_simplify
@should_simplify.setter
def should_simplify(self, should_simplify):
self._should_simplify = should_simplify
@property
def readonly(self):
"""
`True` if the `Path` is read-only.
"""
return self._readonly
def __copy__(self):
"""
Returns a shallow copy of the `Path`, which will share the
vertices and codes with the source `Path`.
"""
import copy
return copy.copy(self)
copy = __copy__
def __deepcopy__(self, memo=None):
"""
Returns a deepcopy of the `Path`. The `Path` will not be
readonly, even if the source `Path` is.
"""
try:
codes = self.codes.copy()
except AttributeError:
codes = None
return self.__class__(
self.vertices.copy(), codes,
_interpolation_steps=self._interpolation_steps)
deepcopy = __deepcopy__
@classmethod
def make_compound_path_from_polys(cls, XY):
"""
Make a compound path object to draw a number
of polygons with equal numbers of sides XY is a (numpolys x
numsides x 2) numpy array of vertices. Return object is a
:class:`Path`
.. plot:: gallery/api/histogram_path.py
"""
# for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
# the CLOSEPOLY; the vert for the closepoly is ignored but we still
# need it to keep the codes aligned with the vertices
numpolys, numsides, two = XY.shape
if two != 2:
raise ValueError("The third dimension of 'XY' must be 2")
stride = numsides + 1
nverts = numpolys * stride
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * cls.LINETO
codes[0::stride] = cls.MOVETO
codes[numsides::stride] = cls.CLOSEPOLY
for i in range(numsides):
verts[i::stride] = XY[:, i]
return cls(verts, codes)
@classmethod
def make_compound_path(cls, *args):
"""Make a compound path from a list of Path objects."""
# Handle an empty list in args (i.e. no args).
if not args:
return Path(np.empty([0, 2], dtype=np.float32))
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = np.empty(total_length, dtype=cls.code_type)
i = 0
for path in args:
if path.codes is None:
codes[i] = cls.MOVETO
codes[i + 1:i + len(path.vertices)] = cls.LINETO
else:
codes[i:i + len(path.codes)] = path.codes
i += len(path.vertices)
return cls(vertices, codes)
def __repr__(self):
return "Path(%r, %r)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, transform=None, remove_nans=True, clip=None,
snap=False, stroke_width=1.0, simplify=None,
curves=True, sketch=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
Additionally, this method can provide a number of standard
cleanups and conversions to the path.
Parameters
----------
transform : None or :class:`~matplotlib.transforms.Transform` instance
If not None, the given affine transformation will
be applied to the path.
remove_nans : {False, True}, optional
If True, will remove all NaNs from the path and
insert MOVETO commands to skip over them.
clip : None or sequence, optional
If not None, must be a four-tuple (x1, y1, x2, y2)
defining a rectangle in which to clip the path.
snap : None or bool, optional
If None, auto-snap to pixels, to reduce
fuzziness of rectilinear lines. If True, force snapping, and
if False, don't snap.
stroke_width : float, optional
The width of the stroke being drawn. Needed
as a hint for the snapping algorithm.
simplify : None or bool, optional
If True, perform simplification, to remove
vertices that do not affect the appearance of the path. If
False, perform no simplification. If None, use the
should_simplify member variable. See also the rcParams
path.simplify and path.simplify_threshold.
curves : {True, False}, optional
If True, curve segments will be returned as curve
segments. If False, all curves will be converted to line
segments.
sketch : None or sequence, optional
If not None, must be a 3-tuple of the form
(scale, length, randomness), representing the sketch
parameters.
"""
if not len(self):
return
cleaned = self.cleaned(transform=transform,
remove_nans=remove_nans, clip=clip,
snap=snap, stroke_width=stroke_width,
simplify=simplify, curves=curves,
sketch=sketch)
vertices = cleaned.vertices
codes = cleaned.codes
len_vertices = vertices.shape[0]
# Cache these object lookups for performance in the loop.
NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
STOP = self.STOP
i = 0
while i < len_vertices:
code = codes[i]
if code == STOP:
return
else:
num_vertices = NUM_VERTICES_FOR_CODE[code]
curr_vertices = vertices[i:i+num_vertices].flatten()
yield curr_vertices, code
i += num_vertices
def cleaned(self, transform=None, remove_nans=False, clip=None,
quantize=False, simplify=False, curves=False,
stroke_width=1.0, snap=False, sketch=None):
"""
Cleans up the path according to the parameters returning a new
Path instance.
.. seealso::
See :meth:`iter_segments` for details of the keyword arguments.
Returns
-------
Path instance with cleaned up vertices and codes.
"""
vertices, codes = _path.cleanup_path(self, transform,
remove_nans, clip,
snap, stroke_width,
simplify, curves, sketch)
internals = {'should_simplify': self.should_simplify and not simplify,
'has_nonfinite': self.has_nonfinite and not remove_nans,
'simplify_threshold': self.simplify_threshold,
'interpolation_steps': self._interpolation_steps}
return Path._fast_from_codes_and_verts(vertices, codes, internals)
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes,
self._interpolation_steps)
def contains_point(self, point, transform=None, radius=0.0):
"""
Returns whether the (closed) path contains the given point.
If *transform* is not ``None``, the path will be transformed before
performing the test.
*radius* allows the path to be made slightly larger or smaller.
"""
if transform is not None:
transform = transform.frozen()
# `point_in_path` does not handle nonlinear transforms, so we
# transform the path ourselves. If `transform` is affine, letting
# `point_in_path` handle the transform avoids allocating an extra
# buffer.
if transform and not transform.is_affine:
self = transform.transform_path(self)
transform = None
return _path.point_in_path(point[0], point[1], radius, self, transform)
def contains_points(self, points, transform=None, radius=0.0):
"""
Returns a bool array which is ``True`` if the (closed) path contains
the corresponding point.
If *transform* is not ``None``, the path will be transformed before
performing the test.
*radius* allows the path to be made slightly larger or smaller.
"""
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result.astype('bool')
def contains_path(self, path, transform=None):
"""
Returns whether this (closed) path completely contains the given path.
If *transform* is not ``None``, the path will be transformed before
performing the test.
"""
if transform is not None:
transform = transform.frozen()
return _path.path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from .transforms import Bbox
path = self
if transform is not None:
transform = transform.frozen()
if not transform.is_affine:
path = self.transformed(transform)
transform = None
return Bbox(_path.get_path_extents(path, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return _path.path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if the path completely encloses the bounding box,
:meth:`intersects_bbox` will return True.
The bounding box is always considered filled.
"""
return _path.path_intersects_rectangle(self,
bbox.x0, bbox.y0, bbox.x1, bbox.y1, filled)
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
if steps == 1:
return self
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
"""
Convert this path to a list of polygons or polylines. Each
polygon/polyline is an Nx2 array of vertices. In other words,
each polygon has no ``MOVETO`` instructions or curves. This
is useful for displaying in backends that do not support
compound paths or Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
If *closed_only* is `True` (default), only closed polygons,
with the last point being the same as the first point, will be
returned. Any unclosed polylines in the path will be
explicitly closed. If *closed_only* is `False`, any unclosed
polygons in the path will be returned as unclosed polygons,
and the closed polygons will be returned explicitly closed by
setting the last point to the same as the first point.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
vertices = self.vertices
if closed_only:
if len(vertices) < 3:
return []
elif np.any(vertices[0] != vertices[-1]):
vertices = list(vertices) + [vertices[0]]
if transform is None:
return [vertices]
else:
return [transform.transform(vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return _path.convert_path_to_polygons(
self, transform, width, height, closed_only)
_unit_rectangle = None
@classmethod
def unit_rectangle(cls):
"""
Return a :class:`Path` instance of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
cls([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0],
[0.0, 0.0]],
[cls.MOVETO, cls.LINETO, cls.LINETO, cls.LINETO,
cls.CLOSEPOLY],
readonly=True)
return cls._unit_rectangle
_unit_regular_polygons = WeakValueDictionary()
@classmethod
def unit_regular_polygon(cls, numVertices):
"""
Return a :class:`Path` instance for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
codes = np.empty((numVertices + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[numVertices] = path
return path
_unit_regular_stars = WeakValueDictionary()
@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
Return a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
codes = np.empty((ns2 + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_stars[(numVertices, innerCircle)] = path
return path
@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
Return a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
_unit_circle = None
@classmethod
def unit_circle(cls):
"""
Return the readonly :class:`Path` of the unit circle.
For most cases, :func:`Path.circle` will be what you want.
"""
if cls._unit_circle is None:
cls._unit_circle = cls.circle(center=(0, 0), radius=1,
readonly=True)
return cls._unit_circle
@classmethod
def circle(cls, center=(0., 0.), radius=1., readonly=False):
"""
Return a Path representing a circle of a given radius and center.
Parameters
----------
center : pair of floats
The center of the circle. Default ``(0, 0)``.
radius : float
The radius of the circle. Default is 1.
readonly : bool
Whether the created path should have the "readonly" argument
set when creating the Path instance.
Notes
-----
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array([[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
dtype=float)
codes = [cls.CURVE4] * 26
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
return Path(vertices * radius + center, codes, readonly=readonly)
_unit_circle_righthalf = None
@classmethod
def unit_circle_righthalf(cls):
"""
Return a :class:`Path` of the right half
of a unit circle. The circle is approximated using cubic Bezier
curves. This uses 4 splines around the circle using the approach
presented here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle_righthalf is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[0.0, -1.0]],
float)
codes = cls.CURVE4 * np.ones(14)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
return cls._unit_circle_righthalf
@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
Return an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
*theta2* is unwrapped to produce the shortest arc within 360 degrees.
That is, if *theta2* > *theta1* + 360, the arc will be from *theta1* to
*theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
halfpi = np.pi * 0.5
eta1 = theta1
eta2 = theta2 - 360 * np.floor((theta2 - theta1) / 360)
# Ensure 2pi range is not flattened to 0 due to floating-point errors,
# but don't try to expand existing 0 range.
if theta2 != theta1 and eta2 <= eta1:
eta2 += 360
eta1, eta2 = np.deg2rad([eta1, eta2])
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), float)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [cls.MOVETO, cls.LINETO]
codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.empty((length, 2), float)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = cls.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return cls(vertices, codes, readonly=True)
@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
Return a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
*theta2* is unwrapped to produce the shortest wedge within 360 degrees.
That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1*
to *theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
_hatch_dict = maxdict(8)
@classmethod
def hatch(cls, hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates a Path that
can be used in a repeated hatching pattern. *density* is the
number of lines per unit square.
"""
from matplotlib.hatch import get_path
if hatchpattern is None:
return None
hatch_path = cls._hatch_dict.get((hatchpattern, density))
if hatch_path is not None:
return hatch_path
hatch_path = get_path(hatchpattern, density)
cls._hatch_dict[(hatchpattern, density)] = hatch_path
return hatch_path
def clip_to_bbox(self, bbox, inside=True):
"""
Clip the path to the given bounding box.
The path must be made up of one or more closed polygons. This
algorithm will not behave correctly for unclosed paths.
If *inside* is `True`, clip to the inside of the box, otherwise
to the outside of the box.
"""
# Use make_compound_path_from_polys
verts = _path.clip_path_to_rect(self, bbox, inside)
paths = [Path(poly) for poly in verts]
return self.make_compound_path(*paths)
def get_path_collection_extents(
master_transform, paths, transforms, offsets, offset_transform):
"""
Given a sequence of :class:`Path` objects,
:class:`~matplotlib.transforms.Transform` objects and offsets, as
found in a :class:`~matplotlib.collections.PathCollection`,
returns the bounding box that encapsulates all of them.
*master_transform* is a global transformation to apply to all paths
*paths* is a sequence of :class:`Path` instances.
*transforms* is a sequence of
:class:`~matplotlib.transforms.Affine2D` instances.
*offsets* is a sequence of (x, y) offsets (or an Nx2 array)
*offset_transform* is a :class:`~matplotlib.transforms.Affine2D`
to apply to the offsets before applying the offset to the path.
The way that *paths*, *transforms* and *offsets* are combined
follows the same method as for collections. Each is iterated over
independently, so if you have 3 paths, 2 transforms and 1 offset,
their combinations are as follows:
(A, A, A), (B, B, A), (C, A, A)
"""
from .transforms import Bbox
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
master_transform, paths, np.atleast_3d(transforms),
offsets, offset_transform))
def get_paths_extents(paths, transforms=[]):
"""
Given a sequence of :class:`Path` objects and optional
:class:`~matplotlib.transforms.Transform` objects, returns the
bounding box that encapsulates all of them.
*paths* is a sequence of :class:`Path` instances.
*transforms* is an optional sequence of
:class:`~matplotlib.transforms.Affine2D` instances to apply to
each path.
"""
from .transforms import Bbox, Affine2D
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
Affine2D(), paths, transforms, [], Affine2D()))
| gpl-3.0 |
feilchenfeldt/pypopgen | modules/tensorfstats.py | 1 | 39058 | """
Tensor based implementation of f-statistics to
simultaneously calculate many comparisons.
Works on VCF files.
For memory efficiency, weighted block-jackknifing is
currently done across whole chromosomes.
F-test was compared to previous implementation (which was checked
against Admixtols). Results were extremely close, e.g.: D +- 0.25%
To run map function of these classes with ipython parallel or
multiprocessing, one might need to use a pickling tool other than
the standard pickling module. The module dill works fine for me.
#!!! NAN HAndling breaks F4ratio!!!!
# make sure that the same rows are removed in all subsamples!
"""
import logging
import numpy as np
import pandas as pd
import os
#local modules
import vcfpandas as vp
import treetools
logger = logging.getLogger()
logging.basicConfig(
format='%(levelname)-8s %(asctime)s %(filename) %(message)s')
#logging.basicConfig(format='%(levelname)-8s %(asctime)s %(funcName)20s() %(message)s')
logger.setLevel(logging.WARNING)
class Test(object):
x = 'Test'
def test(self, i):
return i + self.x
#ACCOUNT FOR NA here?
def run_parallel(self, rc):
rc[:].push({'x': 22})
rc[:].use_cloudpickle()
lv = rc.load_balanced_view()
m = lv.map_async(lambda c: x, range(5))
#return m.result
return m.result
class Ftest(object):
"""
This is the base class for f-tests. Specific
test classes such as Dtest, F3test, F4test, ...
derive from it.
Performs tests for all combinations of h1, h2, h3, (h4).
------------------
Parameters:
vcf_filename : vcf.gz filename. Should be tabix indexed
for random access.
ind_to_pop : dictionary that maps individuals to populations
if each individual is its on population this can also
be a list of individuals
reduce_dim : If true, remove dimensions with length 1 (not implemented).
TODO:
Samples should be defined from the h1s, h2s etc
so that samples in ind_to_pop that are not needed are ignored...
"""
ftype = None
def __init__(self, vcf_filename,
ind_to_pop=None,
result_filebase=None,
reduce_dim=False, haploid=False):
self.vcf_filename = vcf_filename
try:
self.samples = ind_to_pop.keys()
except AttributeError:
ind_to_pop = {k:k for k in ind_to_pop}
self.samples = ind_to_pop.keys()
self.ind_to_pop = ind_to_pop
self.result_filebase = result_filebase
self.haploid = haploid
@staticmethod
def get_hap_df(t0, t1):
"""
Takes 0/1 DataFrames for two haplotpyes
per individual and combines them to per
population allele frequencies.
"""
# if len(t0):
# t0.columns = pd.MultiIndex.from_arrays(
# [t0.columns, [0] * len(t0.columns)])
# if len(t1):
# t1.columns = pd.MultiIndex.from_arrays(
# [t1.columns, [1] * len(t1.columns)])
hap_df = pd.concat([t0, t1], axis=1).sortlevel(axis=1)
#it is enought to drop nas in the allele frequency!
#hap_df = hap_df.dropna(axis=0)
return hap_df
#THINK ABOUT NA handling for this functions
@staticmethod
def get_af(hap_df, ind_to_pop):
if len(hap_df):
af = hap_df.groupby(level=0, axis=1).mean()
af = af.groupby(ind_to_pop, axis=1).mean()
else:
af = pd.DataFrame(columns=set(ind_to_pop.values()))
return af#dropna()
@staticmethod
def get_ac(hap_df, ind_to_pop):
if len(hap_df):
ac = hap_df.groupby(level=0, axis=1).sum()
ac = ac.groupby(ind_to_pop, axis=1).sum()
else:
ac = pd.DataFrame(columns=set(ind_to_pop.values()))
return ac#.dropna()
@staticmethod
def get_n(hap_df, ind_to_pop):
"""
Get the number of haplotypes per population.
"""
if len(hap_df):
#ACCOUNT FOR NA
n = hap_df.groupby(level=0, axis=1).apply(lambda df: df.notnull().sum(axis=1))
n = n.groupby(ind_to_pop).sum()
else:
n = pd.Series(index=set(ind_to_pop.values()))
return n.dropna()
@staticmethod
def fly_reduce_fun(chunk_res, result=None):
if result is None:
return chunk_res
else:
return result + chunk_res
def map(self, chromosomes, start=None, end=None,
map_fun=map, get_result_fun=lambda r:r, chunksize=50000,
return_result=True, save_result=False):
"""
chromosomes : list of chromosome names as in the vcf file
to run the analysis on
stat : Start at this position at each chromosome
end : End at this position at each chromosomes
If start and end are not given, the whole chromosome
is used.
map_fun : The mapping function that will be used
to map calculations to chromosomes.
Could for instance be multiprocessing.map_async
or ipython parallel map_async. Default: map
get_result_fun : The function that receives the result
from the output of the mapping function
For map this would simply be the identity (default)
or for ipython parallel lv.map_async it is
lambda r: r.result() (or lambda r: r.result for older versions)
chunksize : Number of lines in the input vcf to read per chunk
if jackknife_levels is 'chunk' this also determines
the block-jackknife block.
return_result : Whether to return the result for each chromosme
to the mapping function. For very large results
it can be more stable and memory efficient to
set return_result=False and save_result=True
save_result : whether or not to save the result for each chromosome
to disk
"""
assert return_result or save_result
if save_result:
assert self.result_filebase is not None
result_filebase = self.result_filebase
self.chromosomes = chromosomes
self.get_result_fun = get_result_fun
#calc_stat = lambda chunk1, chunk2: self.calc_stat_static(chunk1, chunk2, ind_to_pop, h1s, h2s, h3s, h4s)
#params = {'vcf_filename':self.vcf_filename,
# 'calc_stat':calc_stat, 'ind_to_pop': self.ind_to_pop, 'h1s':self.h1s,
# 'h2s':self.h2s, 'h3s': self.h3s, 'h4s': self.h4s,
# 'samples':self.samples,'fly_reduce_fun':self.fly_reduce_fun,
# 'chunksize':self.chunksize,'mr_haplo_fun':vp.map_fly_reduce_haplo}
vcf_filename = self.vcf_filename
ind_to_pop = self.ind_to_pop
samples = self.samples
fly_reduce_fun = self.fly_reduce_fun
mr_haplo_fun = vp.map_fly_reduce_haplo
calc_stat = self.get_calc_stat(*self.calc_params)
def calc_fstat_fun(chrom):
r = mr_haplo_fun(vcf_filename.format(str(chrom)), calc_stat,
samples_h0=samples,
samples_h1=samples if not self.haploid else None,
chrom=str(chrom), start=start, end=end,
fly_reduce_fun=fly_reduce_fun,
chunksize=chunksize)
if save_result:
np.save(result_filebase+'_'+str(chrom), r)
#return_result = True
if return_result:
return r
self.map_result = map_fun(calc_fstat_fun, chromosomes)
#self.map_result = 'bla'
return self.map_result
def progress(self):
return self.map_result.progress
def load_result_chrom(self, chrom):
r = np.load(self.result_filebase+'_'+str(chrom)+'.npy')
return r
def load_result(self):
res = np.array([self.load_result_chrom(c) for c in self.chromosomes])
return res
def get_result(self):
#try:
# se.result(1)
#except TimeoutError:
# logger.INFO('Not finished, status is: {}'.format({i:s.count(i) for i in set(s)}))
# return
try:
res = np.array(self.get_result_fun(self.map_result))
except AttributeError:
res = self.load_result()
if res[0] is None:
res = self.load_result()
stat = self.get_stat(res)
zscores = self.get_zscores(res, stat)
stat_df = self.get_stat_df(stat, zscores)
self.stat_df = stat_df
return stat_df
class PairwiseDiff(Ftest):
"""
Parameters:
hs1s : list of sample names to use as h1
hs2s : list of sample names to use as h2
vcf_filename : vcf.gz filename. Should be tabix indexed
for random access.
ind_to_pop : dictionary that maps individuals to populations
if each individual is its on population this can also
be a list of individuals
reduce_dim : If true, remove dimensions with length 1 (not implemented).
"""
ftype = 'pwd'
def __init__(self, vcf_filename, ind_to_pop, h1s, **kwa):
self.h1s = h1s
self.h2s = None
self.h3s = None
self.h4s = None
try:
to_delete = []
for k,v in ind_to_pop.iteritems():
if v not in h1s:
to_delete.append(k)
for k in to_delete:
del ind_to_pop[k]
except AttributeError:
pass
Ftest.__init__(self, vcf_filename, ind_to_pop, **kwa)
self.calc_params = (self.ind_to_pop, self.h1s)
@staticmethod
def fly_reduce_fun(chunk_res, result=None):
"""
Function that reduces on the fly by summing
"""
if result is None:
return chunk_res
else:
return result + chunk_res
@staticmethod
def calc_stat_static(chunk1, chunk2, ind_to_pop, h1s, *args):
hap_df = Ftest.get_hap_df(chunk1, chunk2)
if len(hap_df):
groups = hap_df.groupby(ind_to_pop, axis=1, level=0)
#ids = groups.apply(lambda df:np.nan).index.values
return calc.divergence(groups)
else:
return np.zeros((len(groups),
len(groups)))
@staticmethod
def get_calc_stat(*args):
def calc_stat(chunk1, chunk2):
return PairwiseDiff.calc_stat_static(chunk1, chunk2, *args)
return calc_stat
@staticmethod
def jackknife(res, i):
return np.sum(res[np.arange(len(res))!=i, 0], axis=0)/np.sum(res[np.arange(len(res))!=i, 1], axis=0)
@staticmethod
def get_stat(res):
return np.sum(res, axis=0)
@staticmethod
def get_zscores(res, pwd):
return None
@staticmethod
def get_stat_df_static(pwd, ind_to_pop):
s = sorted(set(ind_to_pop.values()))
return pd.DataFrame(pwd, index=s, columns=s)
def get_stat_df(self, stat, zscores):
return self.get_stat_df_static(stat, self.ind_to_pop)
class F3test(Ftest):
"""
Parameters:
hs3s : list of sample names to use as h3
This is the branch called 'C' in
Patterson et al. 2012 Genetics
hs1s : list of sample names to use as h1
hs2s : list of sample names to use as h2
vcf_filename : vcf.gz filename. Should be tabix indexed
for random access.
ind_to_pop : dictionary that maps individuals to populations
if each individual is its on population this can also
be a list of individuals
reduce_dim : If true, remove dimensions with length 1 (not implemented).
"""
ftype = 'F3'
def __init__(self, vcf_filename, ind_to_pop, h3s, h1s, h2s, do_drop_self_comparisons=False, **kwa):
self.h1s = h1s
self.h2s = h2s
self.h3s = h3s
self.do_drop_self_comparisons = do_drop_self_comparisons
Ftest.__init__(self, vcf_filename, ind_to_pop, **kwa)
self.calc_params = (self.ind_to_pop, self.h1s, self.h2s, self.h3s)
@staticmethod
def fly_reduce_fun(chunk_res, result=None):
"""
Function that reduces on the fly by summing
"""
if result is None:
return chunk_res
else:
return result + chunk_res
@staticmethod
def calc_stat_static(chunk1, chunk2, ind_to_pop, h1s, h2s, h3s, *args):
hap_df = Ftest.get_hap_df(chunk1, chunk2)
af = Ftest.get_af(hap_df, ind_to_pop)
ac = Ftest.get_ac(hap_df, ind_to_pop)
n = Ftest.get_n(hap_df, ind_to_pop)
if len(af):
return calc.f3(ac[h3s], n[h3s], af[h1s], af[h2s])
else:
return np.zeros((len(h3s),len(h1s), len(h2s)))
@staticmethod
def get_calc_stat(*args):
def calc_stat(chunk1, chunk2):
return F3test.calc_stat_static(chunk1, chunk2, *args)
return calc_stat
@staticmethod
def jackknife(res, i):
return np.sum(res[np.arange(len(res))!=i, 0], axis=0)/np.sum(res[np.arange(len(res))!=i, 1], axis=0)
@staticmethod
def get_stat(res):
return np.sum(res, axis=0)
@staticmethod
def get_zscores(res, pwd):
return None
@staticmethod
def drop_self_comparisons_static(stat_df, h1s, h2s, h3s):
dup_sample_indices = [(h3, h1, h2) for h3 in h3s for h1 in h1s for h2 in h2s if h3==h1 or h3==h2 or h1==h2]
df = stat_df.drop(dup_sample_indices)
return df
def drop_self_comparisons(self):
df = self.drop_self_comparisons_static(self.stat_df, self.h1s, self.h2s, self.h3s)
self.stat_df_drop = df
return self.stat_df_drop
@staticmethod
def get_stat_df_static(f3s, stat_name, do_drop_self_comparisons, h1s, h2s, h3s):
df = pd.DataFrame(f3s.flatten(),
index=pd.MultiIndex.from_tuples([(h3,h1,h2) for h3 in h3s for h1 in h1s for h2 in h2s]),
columns=[stat_name])
df.index.names = ['h3','h1','h2']
if do_drop_self_comparisons:
df = F3test.drop_self_comarisons_static(df, h1s, h2s, h3s)
return df
def get_stat_df(self, stat, zscores):
return self.get_stat_df_static(stat, self.ftype, self.do_drop_self_comparisons, self.h1s, self.h2s, self.h3s)
class Dtest(Ftest):
"""
Parameters:
hs1s : list of sample names to use as h1
hs2s : list of sample names to use as h2
hs3s : list of sample names to use as h3
hs4s : list of sample names to use as h4
vcf_filename : vcf.gz filename. Should be tabix indexed
for random access.
ind_to_pop : dictionary that maps individuals to populations
if each individual is its on population this can also
be a list of individuals
reduce_dim : If true, remove dimensions with length 1 (not implemented).
reduce_dim : If true, remove dimensions with length 1 (not implemented).
jackknife_levels : 'chrom' ... weighted block-jackknife across whole chromosomes
'chumk' ... block-jackknife across chunks of chunksize snps
This can be very memory intensive.
"""
ftype = 'D'
def __init__(self, vcf_filename, ind_to_pop, h1s, h2s, h3s, h4s, **kwa):
self.h1s = h1s
self.h2s = h2s
self.h3s = h3s
self.h4s = h4s
Ftest.__init__(self, vcf_filename, ind_to_pop, **kwa)
self.calc_params = (self.ind_to_pop, self.h1s, self.h2s, self.h3s, self.h4s)
@staticmethod
def fly_reduce_fun(chunk_res, result=None):
"""
Function that reduces on the fly by summing
and also implements a counter of chunks
so that weighted jackknifing can be performed.
"""
if result is None:
return (chunk_res[0], chunk_res[1], 1)
else:
return (result[0] + chunk_res[0], result[1] + chunk_res[1], result[2] + 1)
@staticmethod
def calc_stat_static(chunk1, chunk2, ind_to_pop, h1s, h2s, h3s, h4s):
hap_df = Dtest.get_hap_df(chunk1, chunk2)
af = Dtest.get_af(hap_df, ind_to_pop)
if len(af):
return calc.d(af[h1s], af[h2s], af[h3s], af[h4s])
else:
return np.zeros((len(h1s),len(h2s),len(h3s),len(h4s))), np.zeros((len(h1s),len(h2s),len(h3s),len(h4s)))
@staticmethod
def get_calc_stat(*args):
def calc_stat(chunk1, chunk2):
return Dtest.calc_stat_static(chunk1, chunk2, *args)
return calc_stat
@staticmethod
def jackknife(res, i):
return np.sum(res[np.arange(len(res))!=i, 0], axis=0)/np.sum(res[np.arange(len(res))!=i, 1], axis=0)
@staticmethod
def get_stat(res):
d = np.sum(res[:,0], axis=0)*1./np.sum(res[:,1], axis=0)
return d
@staticmethod
def get_zscores(res, d, weights=None):
jackknife_estimates = [Dtest.jackknife(res, i) for i in np.arange(len(res))]
if weights is None:
weights = res[:,2]*1./np.sum(res[:,2])
average = np.average(jackknife_estimates, axis=0, weights=weights)
variance = np.average(1.*(jackknife_estimates - average)**2, axis=0, weights=weights).astype(float)
try:
zscores = d * 1. / ( np.sqrt(variance) * np.sqrt(len(jackknife_estimates)-1) )
except AttributeError, e:
print variance.shape
print np.sqrt(5.)
print variance.max()
print variance.min()
#print np.sqrt(variance)
return variance
raise e
return zscores
@staticmethod
def get_stat_df_static(stat, zscores, h1s, h2s, h3s, h4s, stat_name):
stat_s = pd.Series(stat.flatten(),
index=pd.MultiIndex.from_tuples([(h1,h2,h3,h4) for \
h1 in h1s for h2 in h2s for h3 in h3s for h4 in h4s]))
stat_s.name = stat_name
z_s = pd.Series(zscores.flatten(),
index=pd.MultiIndex.from_tuples([(h1,h2,h3,h4) for \
h1 in h1s for h2 in h2s for h3 in h3s for h4 in h4s]))
z_s.name = 'Z'
stat_df = pd.concat([stat_s, z_s], axis=1)
stat_df.sort_values(stat_name, ascending=False, inplace=True)
return stat_df
def get_stat_df(self,stat, zscores):
return self.get_stat_df_static(stat, zscores, self.h1s, self.h2s, self.h3s, self.h4s, self.ftype)
@staticmethod
def drop_self_comparisons_static(stat_df, h1s, h2s, h3s, h4s):
dup_sample_indices = [(h1, h2, h3, h4) for h1 in h1s for h2 in h2s for h3 in h3s for h4 in h4s \
if h3==h1 or h3==h2 or h1==h2 or h3==h4 or h1==h4 or h2==h4]
df = stat_df.drop(dup_sample_indices)
return df
def drop_self_comparisons(self):
df = self.drop_self_comparisons_static(self.stat_df, self.h1s, self.h2s, self.h3s, self.h4s)
self.stat_df_drop = df
return self.stat_df_drop
def get_consistent_with_tree(self, ete_tree):
"""
Get a data frame with the subset
of tuples that are consistent with a
given ete tree.
Parameters:
ete_tree : ete3 tree object of all samples.
Needs to be rooted and include
all outgroups..
"""
self.stat_df_consist = treetools.get_consistent_df(self.stat_df, ete_tree)
return self.stat_df_consist
class F4ratio(Dtest):
"""
Parameters:
hs1s : list of sample names to use as h1
hs2s : list of sample names to use as h2
hs3s : list of sample names to use as h3
hs4s : list of sample names to use as h4
vcf_filename : vcf.gz filename. Should be tabix indexed
for random access.
ind_to_pop : dictionary that maps individuals to populations
if each individual is its on population this can also
be a list of individuals
reduce_dim : If true, remove dimensions with length 1 (not implemented).
reduce_dim : If true, remove dimensions with length 1 (not implemented).
jackknife_levels : 'chrom' ... weighted block-jackknife across whole chromosomes
'chumk' ... block-jackknife across chunks of chunksize snps
This can be very memory intensive.
"""
ftype = 'F4ratio'
def __init__(self, vcf_filename, ind_to_pop, h1s, h2s, h3s, h4s, subsampling_method='per_chunk_replace', **kwa):
Dtest.__init__(self, vcf_filename, ind_to_pop, h1s, h2s, h3s, h4s, **kwa)
pop_to_hap = {pop:[] for pop in set(self.ind_to_pop.values())}
for s, pop in self.ind_to_pop.iteritems():
pop_to_hap[pop].append((s, 0))
pop_to_hap[pop].append((s, 1))
self.pop_to_hap = pop_to_hap
self.subsampling_method = subsampling_method
self.calc_params = (self.ind_to_pop, self.pop_to_hap, self.h1s, self.h2s,
self.h3s, self.h4s, self.subsampling_method)
@staticmethod
def get_af_hap(hap_df, hap_to_pop):
if len(hap_df):
af = hap_df.groupby(hap_to_pop, axis=1).mean()
else:
af = pd.DataFrame(columns=set(hap_to_pop.values()))
return af
@staticmethod
def calc_stat_static(chunk1, chunk2, ind_to_pop, pop_to_hap, h1s, h2s, h3s, h4s, subsampling_method):
hap_df = F4ratio.get_hap_df(chunk1, chunk2)
af = F4ratio.get_af(hap_df, ind_to_pop)
#do the random subsets for each chunk independently
if subsampling_method == 'per_chunk_noreplace' or \
subsampling_method == 'per_chunk_replace':
#r00 = os.urandom(3)
#r0 = int(r00.encode('hex'), 16)
#r1 = int(np.ceil(hap_df.sum().sum()/1111.))
#np.random.seed(int(r0*r1))
hap_to_pop_a = {}
hap_to_pop_b = {}
for h3 in h3s:
samples = pop_to_hap[h3]
sample_idx = np.arange(len(samples))
#try:
# ixa = np.random.choice(sample_idx, len(samples)/2, replace=False)
#except ValueError, e:
# raise e
ixa = np.random.choice(sample_idx, len(samples)/2, replace=False)
if subsampling_method == 'per_chunk_noreplace':
ixb = [i for i in sample_idx if i not in ixa]
else:
ixb = np.random.choice(sample_idx, len(samples)/2, replace=False)
#ixb = np.random.choice(sample_idx, len(samples)/2, replace=False)
hap_to_pop_a.update({samples[i]: h3 for i in ixa})
hap_to_pop_b.update({samples[i]: h3 for i in ixb})
af3_a = F4ratio.get_af_hap(hap_df, hap_to_pop_a)[h3s]
af3_b = F4ratio.get_af_hap(hap_df, hap_to_pop_b)[h3s]
#hap_df[samples_a].mean(axis=1)
#af3_b = hap_df[samples_b].mean(axis=1)
#af_sub = F4ratio.get_af_hap(hap_df, hap_to_pop_ab)
elif subsampling_method == 'no_subsampling':
#this is equivalent to f_hom from Martin, Davey, Jiggins
af3_a = af[h3s]
af3_b = af[h3s]
if len(af):
#here we remove all SNP sites that contain
#nans in any population. This is unfortunate,
#because it looses info (for the comparisons without nan_)
#but np.einsum cannot handle any nans.
def nn(df):
return df.notnull().all(axis=1)
nnl = nn(af[h1s])&nn(af[h2s])&nn(af[h3s])&nn(af3_a)&nn(af3_b)&nn(af[h4s])
return calc.f4ratio(af[nnl][h1s], af[nnl][h2s], af[nnl][h3s], af3_a[nnl], af3_b[nnl], af[nnl][h4s])
else:
return np.zeros((len(h1s),len(h2s),len(h3s),len(h4s))), np.zeros((len(h1s),len(h2s),len(h3s),len(h4s)))
@staticmethod
def get_calc_stat(*args):
def calc_stat(chunk1, chunk2):
return F4ratio.calc_stat_static(chunk1, chunk2, *args)
return calc_stat
class F4ratioH3derived(Dtest):
"""
An implementation of the f4ratio where
only sites are considered where h3 is derived.
"""
pass
class calc:
"""
This is a container for
the basic functions that do the
calculations.
Not to be instantiated.
ATTENTION:
All the functions that use
einsum produce nans for any product
with missing data.
Missing data should be removed beforehand.
"""
@staticmethod
def pwd(af1, af2):
"""
ATTENTION pi needs to be corrected for resampling
similar to f3!!!!!!
Calculate pairwise differences (pi and dxy).
Input can be np.ndarray or pd.DataFrame.
Rows are allele frequencies or haplotypes for variants.
Columns are individuals or populations.
Rows containing np.nan should be removed beforehand.
Result:
Diagonal entries are pi = 2p(1-p).
Off-diagonal entries are dxy = pq
"""
return np.einsum('ij,ik->jk',af1, 1-af2) + np.einsum('ij,ik->jk',1-af1, af2)
# @staticmethod
# def pwd0(af_df):
# """
# Calculate pairwise differences (pi and dxy).
#
# Input can be np.ndarray or pd.DataFrame.
# Rows are allele frequencies or haplotypes for variants.
# Columns are individuals or populations.
#
# Rows containing np.nan should be removed beforehand.
#
# Result:
# Diagonal entries are pi = 2p(1-p).
# Off-diagonal entries are dxy = pq
#
#
# """
# pw_af = np.einsum('ij,ik->jk',af_df, 1-af_df)
#
# try:
# pw_af = pd.DataFrame(pw_af, index = af_df.columns, columns = af_df.columns)
# except AttributeError:
# pass
#
# return pw_af + pw_af.T
@staticmethod
def divergence(groups):
"""
!!! careful the ordering of groups can be surprising.
use [n for n,_ in groups] to get the axis labels of result
Calculate pairwise differences (pi and dxy).
This function returns an unbiased estimate
for pi.
Rows containing np.nan should be removed beforehand.
Parameters:
groups ... grouped pandas dataframe that is grouped into
populations. Populations could also be the
two haplotypes of a single indiviudal.
Result:
pd.DataFrame of nucleotide diversity pi (diagonal)
and divergence dxy (off-diagonal)
Diagonal entries are pi = 2p(1-p*),
where p* = (allele count - 1) / (n samples - 1)
Off-diagonal entries are dxy = p1*(1-p2) + (1-p1)*p2
"""
af = groups.mean()
ac = groups.sum()
n = groups.apply(lambda df: df.notnull().sum(axis=1))
#handle missing data
af = af.dropna(axis=0)
ac = ac.dropna(axis=0)
n = n.loc[af.index]
pi = np.zeros((af.shape[1], af.shape[1]))
np.fill_diagonal(pi,(2*af*(1-(ac-1.)/(n-1))).sum())
dxy = np.einsum('ij,ik->jk',af, 1-af) + np.einsum('ij,ik->jk',1-af, af)
np.fill_diagonal(dxy, 0)
divergence = pi + dxy
return divergence
@staticmethod
def f2(groups1, groups2):
"""
Calculate the f3 statistic as defined in
Patterson et al. Genetics 2012. This is
the numerator in Patterson's D statistic.
This corresponds to the unbiased estimator
in Patterson et al. 2012 Appendix A.
af3 corresponds to population 'C' in
Patterson et al.
Input is 3 np.ndarrays or pd.DataFrames
for each of which rows = SNPs,
columns = allele frequencies,
with data for all individuals/populations
which should
be tested for h1, h2, h3, respectively.
Genotypes or haplotypes can be converted
to allele frequencies as 0, 0.5, 1 and 0, 1,
respectively.
Output is a three dimensional np.ndarray,
j x k x l, where the length of the 3
axis is the number of individuals in af2,
af1, af2, respectively.
"""
af1 = groups1.mean()
af2 = groups2.mean()
n1 = groups1.apply(lambda df: df.notnull().sum(axis=1))
n2 = groups2.apply(lambda df: df.notnull().sum(axis=1))
dummy1 = np.ones(af1.shape[1])
dummy2 = np.ones(af2.shape[1])
r = - 2 * np.einsum('ij,ik->jk',af1, af2) \
+ np.einsum('ij,ij,k->jk',af1, af1-(1-af1)/(n1-1), dummy2) \
+ np.einsum('j,ik,ik->jk',dummy1, af2, af2-(1-af2)/(n2-1))
f2_s = pd.Series(r.flatten(),
index= pd.MultiIndex.from_tuples([(h1,h2) \
for h1 in af1.columns for h2 in af2.columns]))
f2_s.index.names = ['h1','h2']
# The estimator above is unbiased, except for self-comparisons.
# Self-comparisons should be zero by definition
f2_s[f2_s.index.get_level_values(0) == f2_s.index.get_level_values(1)] = 0
return f2_s
@staticmethod
def f3(ac3, n3, af1, af2):
"""
Calculate the f3 statistic as defined in
Patterson et al. Genetics 2012. This is
the numerator in Patterson's D statistic.
This corresponds to the unbiased estimator
in Patterson et al. 2012 Appendix A.
af3 corresponds to population 'C' in
Patterson et al.
Input is 3 np.ndarrays or pd.DataFrames
for each of which rows = SNPs,
columns = allele frequencies,
with data for all individuals/populations
which should
be tested for h1, h2, h3, respectively.
Genotypes or haplotypes can be converted
to allele frequencies as 0, 0.5, 1 and 0, 1,
respectively.
Output is a three dimensional np.ndarray,
j x k x l, where the length of the 3
axis is the number of individuals in af2,
af1, af2, respectively.
"""
af3 = ac3*1./n3
dummy3 = np.ones(af3.shape[1])
dummy1 = np.ones(af1.shape[1])
dummy2 = np.ones(af2.shape[1])
return np.einsum('ij,ij,k,l->jkl',af3, (ac3-1)/(n3-1), dummy1, dummy2) \
- np.einsum('ij,ik,l->jkl',af3, af1, dummy2) \
- np.einsum('ij,k,il->jkl',af3, dummy1, af2) \
+ np.einsum('j,ik,il->jkl',dummy3, af1, af2)
@staticmethod
def f3b(groups3, groups1, groups2):
"""
Calculate the f3 statistic as defined in
Patterson et al. Genetics 2012. This is
the numerator in Patterson's D statistic.
This corresponds to the unbiased estimator
in Patterson et al. 2012 Appendix A.
However, it is not unbiased for entries
where group1 and group2 correspond to
the identical population. To get an un-biased
estimate for such cases use f2. f3 reduced to
f2 if 1==2.
af3 corresponds to population 'C' in
Patterson et al.
Input is 3 pd.DataFrames grouped into populations
using DataFrame.groupby.
In the original data frame rows = SNPs,
columns = haplotypes,
with data for all individuals/populations
which should
be tested for h1, h2, h3, respectively.
Returns:
A pandas Series with a 3-level index,
corresponding to h3, h1, h2 taken from groups3,
groups1, groups2, respectively.
"""
af1 = groups1.mean()
af2 = groups2.mean()
af3 = groups3.mean()
n3 = groups3.apply(lambda df: df.notnull().sum(axis=1))
dummy3 = np.ones(af3.shape[1])
dummy1 = np.ones(af1.shape[1])
dummy2 = np.ones(af2.shape[1])
r = np.einsum('ij,ij,k,l->jkl',af3, af3-(1-af3)/(n3-1), dummy1, dummy2) \
- np.einsum('ij,ik,l->jkl',af3, af1, dummy2) \
- np.einsum('ij,k,il->jkl',af3, dummy1, af2) \
+ np.einsum('j,ik,il->jkl',dummy3, af1, af2)
f3_s = pd.Series(r.flatten(),
index= pd.MultiIndex.from_tuples([(h3,h1,h2) for h3 in af3.columns \
for h1 in af1.columns for h2 in af2.columns]))
f3_s.index.names = ['h3','h1','h2']
# correct entries where group3 is idential to one of the other groups
# should be 0 in such a case
f3_s[ (f3_s.index.get_level_values(0) == f3_s.index.get_level_values(1)) | \
(f3_s.index.get_level_values(0) == f3_s.index.get_level_values(2))] = 0
return f3_s
@staticmethod
def f4(af1, af2, af3, af4):
"""
Calculate the f4 statistic as defined in
Patterson et al. Genetics 2012. This is
the numerator in Patterson's D statistic.
Input is 4 np.ndarrays or pd.DataFrames
for each of which rows = SNPs,
columns = allele frequencies,
with data for all individuals/populations
which should
be tested for h1, h2, h3, h4, respectively.
Genotypes or haplotypes can be converted
to allele frequencies as 0, 0.5, 1 and 0, 1,
respectively.
Output is a four dimensional np.ndarray,
j x k x l x m, where the length of the 4
axis is the number of individuals in af1,
af2, af3, af4, respectively.
"""
return (np.einsum('ij,ik,il,im->jklm',af1, 1-af2, 1-af3, af4) \
- np.einsum('ij,ik,il,im->jklm',1-af1, af2, 1-af3, af4) \
+ np.einsum('ij,ik,il,im->jklm',1-af1, af2, af3, 1-af4) \
- np.einsum('ij,ik,il,im->jklm',af1, 1-af2, af3, 1-af4))
@staticmethod
def f4ratio_denom(af1, af2, af3a, af3b, af4):
"""
Calculate the f4 statistic as defined in
Patterson et al. Genetics 2012. This is
the numerator in Patterson's D statistic.
Input is 4 np.ndarrays or pd.DataFrames
for each of which rows = SNPs,
columns = allele frequencies,
with data for all individuals/populations
which should
be tested for h1, h2, h3, h4, respectively.
Genotypes or haplotypes can be converted
to allele frequencies as 0, 0.5, 1 and 0, 1,
respectively.
Output is a four dimensional np.ndarray,
j x k x l x m, where the length of the 4
axis is the number of individuals in af1,
af2, af3, af4, respectively.
"""
dummy = np.ones(af2.shape[1])
af3a = np.array(af3a)
af3b = np.array(af3b)
return (np.einsum('ij,k,il,im->jklm',af1, dummy, (1-af3a) * (1-af3b), af4) \
- np.einsum('ij,k,il,im->jklm',1-af1, dummy, af3a * (1-af3b), af4) \
+ np.einsum('ij,k,il,im->jklm',1-af1, dummy, af3a * af3b, 1-af4) \
- np.einsum('ij,k,il,im->jklm',af1, dummy, (1-af3a) * af3b, 1-af4))
@staticmethod
def d_denom(af1, af2, af3, af4):
"""
Calculate numerator of Patterson's
D statistic. See
Patterson et al. Genetics 2012.
Input is 4 np.ndarrays or pd.DataFrames
for each of which rows = SNPs,
columns = allele frequencies,
with data for all individuals/populations
which should
be tested for h1, h2, h3, h4, respectively.
Genotypes or haplotypes can be converted
to allele frequencies as 0, 0.5, 1 and 0, 1,
respectively.
Output is a four dimensional np.ndarray,
j x k x l x m, where the length of the 4
axis is the number of individuals in af1,
af2, af3, af4, respectively.
"""
return (np.einsum('ij,ik,il,im->jklm',af1, 1-af2, 1-af3, af4) \
+ np.einsum('ij,ik,il,im->jklm',1-af1, af2, 1-af3, af4) \
+ np.einsum('ij,ik,il,im->jklm',1-af1, af2, af3, 1-af4) \
+ np.einsum('ij,ik,il,im->jklm',af1, 1-af2, af3, 1-af4))
@staticmethod
def d(af1, af2, af3, af4):
"""
Calculate Patterson's
D statistic. See
Patterson et al. Genetics 2012.
Input is 4 np.ndarrays or pd.DataFrames
for each of which rows = SNPs,
columns = allele frequencies,
with data for all individuals/populations
which should
be tested for h1, h2, h3, h4, respectively.
Genotypes or haplotypes can be converted
to allele frequencies as 0, 0.5, 1 and 0, 1,
respectively.
Output is a four dimensional np.ndarray,
j x k x l x m, where the length of the 4
axis is the number of individuals in af1,
af2, af3, af4, respectively.
"""
return calc.f4(af1, af2, af3, af4), calc.d_denom(af1, af2, af3, af4)
@staticmethod
def f4ratio(af1, af2, af3, af3a, af3b, af4):
"""
Calculate numerator and denominator of f4
admixture ratio. See
Patterson et al. Genetics 2012.
Input is 5 np.ndarrays or pd.DataFrames
for each of which rows = SNPs,
columns = allele frequencies,
with data for all individuals/populations
which should
be tested for h1, h2, h3a, h3b, h4, respectively.
af3a and af3b should have the same dimension.
Genotypes or haplotypes can be converted
to allele frequencies as 0, 0.5, 1 and 0, 1,
respectively.
Output is a four dimensional np.ndarray,
j x k x l x m, where the length of the 4
axis is the number of individuals in af1,
af2, af3, af4, respectively.
"""
return calc.f4(af1, af2, af3, af4), calc.f4ratio_denom(af1, af2, af3a, af3b, af4)
class convert:
"""
Container object with
unctions to manipulate
data frames etc.
Not to be instantiated.
"""
@staticmethod
def haplo_to_individual(pwd_haplo):
"""
Function to summarize haplotypes
of diploids to get individual
pairwise differences.
"""
pw = pwd_haplo.groupby(level=0, axis=0).mean().groupby(level=0,axis=1).mean()
return pw
#def get_sample_to_pop(populations):
# return {vi:k for k,v in populations.iteritems() for vi in v}
| mit |
ennehekma/d2d | python/plot_lambert_scan_maps.py | 4 | 6944 | '''
Copyright (c) 2014-2016 Kartik Kumar, Dinamica Srl ([email protected])
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# Plotting
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
from nlcmap import nlcmap
# I/O
import commentjson
import json
from pprint import pprint
import sqlite3
# Numerical
import numpy as np
import pandas as pd
# System
import sys
import time
print ""
print "------------------------------------------------------------------"
print " D2D "
print " 0.0.2 "
print " Copyright (c) 2015-2016, K. Kumar ([email protected]) "
print " Copyright (c) 2016, E.J. Hekma ([email protected]) "
print "------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
print ""
print "******************************************************************"
print " Input parameters "
print "******************************************************************"
print ""
# Parse JSON configuration file
# Raise exception if wrong number of inputs are provided to script
if len(sys.argv) != 2:
raise Exception("Only provide a JSON config file as input!")
json_data = open(sys.argv[1])
config = commentjson.load(json_data)
json_data.close()
pprint(config)
print ""
print "******************************************************************"
print " Operations "
print "******************************************************************"
print ""
print "Fetching scan data from database ..."
# Connect to SQLite database.
try:
database = sqlite3.connect(config['database'])
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
departure_epochs = pd.read_sql("SELECT DISTINCT departure_epoch \
FROM lambert_scanner_results;", \
database)
for i in xrange(0,departure_epochs.size):
c = departure_epochs['departure_epoch'][i]
print "Plotting scan map with departure epoch: ",c,"Julian Date"
# Fetch scan data.
map_order = "departure_" + config['map_order']
scan_data = pd.read_sql("SELECT departure_object_id, arrival_object_id, \
min(transfer_delta_v), "+ map_order + " \
FROM lambert_scanner_results \
WHERE departure_epoch BETWEEN " + str(c-0.00001) + " \
AND "+str(c+0.00001) +" \
GROUP BY departure_object_id, arrival_object_id;", \
database)
scan_data.columns = ['departure_object_id','arrival_object_id', \
'transfer_delta_v',str(map_order)]
scan_order = scan_data.sort_values(str(map_order)) \
.drop_duplicates('departure_object_id')[ \
['departure_object_id',str(map_order)]]
scan_map = scan_data.pivot(index='departure_object_id', \
columns='arrival_object_id',
values='transfer_delta_v')
scan_map = scan_map.reindex(index=scan_order['departure_object_id'], \
columns=scan_order['departure_object_id'])
# Set up color map.
bins = np.linspace(scan_data['transfer_delta_v'].min(), \
scan_data['transfer_delta_v'].max(), 10)
groups = scan_data['transfer_delta_v'].groupby( \
np.digitize(scan_data['transfer_delta_v'], bins))
levels = groups.mean().values
cmap_lin = plt.get_cmap(config['colormap'])
cmap = nlcmap(cmap_lin, levels)
# Plot heat map.
ax1 = plt.subplot2grid((15,15), (2, 0),rowspan=13,colspan=14)
heatmap = ax1.pcolormesh(scan_map.values, cmap=cmap, \
vmin=scan_data['transfer_delta_v'].min(), \
vmax=scan_data['transfer_delta_v'].max())
ax1.set_xticks(np.arange(scan_map.shape[1] + 1)+0.5)
ax1.set_xticklabels(scan_map.columns, rotation=90)
ax1.set_yticks([])
ax1.tick_params(axis='both', which='major', labelsize=config['tick_label_size'])
ax1.set_xlim(0, scan_map.shape[1])
ax1.set_ylim(0, scan_map.shape[0])
ax1.set_xlabel('Departure object',fontsize=config['axis_label_size'])
ax1.set_ylabel('Arrival object',fontsize=config['axis_label_size'])
# Plot axis ordering.
ax2 = plt.subplot2grid((15,15), (0, 0),rowspan=2,colspan=14,sharex=ax1)
ax2.step(np.arange(0.5,scan_map.shape[1]+.5),scan_order[str(map_order)],'k',linewidth=2.0)
ax2.get_yaxis().set_major_formatter(plt.FormatStrFormatter('%.2e'))
ax2.tick_params(axis='both', which='major', labelsize=config['tick_label_size'])
plt.setp(ax2.get_xticklabels(), visible=False)
ax2.set_ylabel(config['map_order_axis_label'],fontsize=config['axis_label_size'])
# Plot color bar.
ax3 = plt.subplot2grid((15,15), (0, 14), rowspan=15)
color_bar = plt.colorbar(heatmap,cax=ax3)
color_bar.ax.get_yaxis().labelpad = 20
color_bar.ax.set_ylabel(r'Total transfer $\Delta V$ [km/s]', rotation=270)
plt.tight_layout()
# Save figure to file.
plt.savefig(config["output_directory"] + "/" + config["scan_figure"] + "_"+str(i+1) + \
".png", dpi=config["figure_dpi"])
plt.close()
print "Figure ",i+1," generated successfully...."
print "Figure generated successfully!"
print ""
# Close SQLite database if it's still open.
if database:
database.close()
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
| mit |
tkchafin/mrbait | tests/benchmark_gff.py | 1 | 11663 | #!/usr/bin/python
from collections import Counter
from collections import defaultdict
import time
import unicodedata
import os
import sys
import pandas as pd
from collections import namedtuple
"""
Benchmarking two different structures for yielding GFF Records
Results:
-At small numbers, namedtuples and native lists are much faster.
-namedtuple and pandas df do have some more up-front cost in defining them
But this is inconsequential if the definition won't be repeated
-Adding in 1 access (of seqid) per iteration didn't change much.
-At high number of replicates, they all seem to take about the same time
-Container and slotContainer don't provide significant speed increase
Note that slotContainer should use less memory
Testing with namedtuples: 100 iterations
1 ms
Testing with namedtuples: 1000 iterations
3 ms
Testing with namedtuples: 10000 iterations
20 ms
Testing with namedtuples: 100000 iterations
201 ms
Testing with namedtuples: 1000000 iterations
1980 ms
Testing with pandas df: 100 iterations
15 ms
Testing with pandas df: 1000 iterations
14 ms
Testing with pandas df: 10000 iterations
34 ms
Testing with pandas df: 100000 iterations
211 ms
Testing with pandas df: 1000000 iterations
1983 ms
Testing with dict: 100 iterations
0 ms
Testing with dict: 1000 iterations
2 ms
Testing with dict: 10000 iterations
19 ms
Testing with dict: 100000 iterations
199 ms
Testing with dict: 1000000 iterations
1973 ms
Testing with container: 100 iterations
0 ms
Testing with container: 1000 iterations
2 ms
Testing with container: 10000 iterations
20 ms
Testing with container: 100000 iterations
197 ms
Testing with container: 1000000 iterations
1970 ms
Testing with slots container: 100 iterations
0 ms
Testing with slots container: 1000 iterations
2 ms
Testing with slots container: 10000 iterations
20 ms
Testing with slots container: 100000 iterations
196 ms
Testing with slots container: 1000000 iterations
1973 ms
Conclusion:
-probably will go with a native dict since namedtuple doesn't seem to speed
things up much. Pandas DF seems overboard for this simple container case
-namedtuple and dict should also theoretically take same space in mem,
although in this case we should only have 1 record in mem per thread, so
this is negligible anyways
-FINAL: If we weren't writing a generator function, and just needed to make
a fuckton of these, the slotContainerGFF object would probably be
the most memory efficient. Since we are writing a generator function
in this case, and only keeping one record in mem at a time, I think
I'll just go with the simplest method and use a native Python dict/
"""
def time_me(method):
def wrapper(*args, **kw):
startTime = int(round(time.time() * 1000))
result = method(*args, **kw)
endTime = int(round(time.time() * 1000))
print(endTime - startTime,'ms')
return result
return wrapper
#Function to split GFF attributes
def splitAttributes(a):
ret = {}
for thing in a.split(";"):
stuff = thing.split("=")
if len(stuff) != 2: continue #Eats error silently, YOLO
key = stuff[0]
value = stuff[1]
ret[key] = value
return ret
#Class for holding GFF Record data, with mem opt using __slots__ magic
class slotContainerGFF():
__slots__ = ["seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes"]
def __init__(self, things):
self.seqid = None if things[0] == "." else things[0]
self.source = None if things[1] == "." else things[1]
self.type = None if things[2] == "." else things[2]
self.start = None if things[3] == "." else int(things[3])
self.end = None if things[4] == "." else int(things[4])
self.score = None if things[5] == "." else float(things[5])
self.strand = None if things[6] == "." else things[6]
self.phase = None if things[7] == "." else things[7]
self.attributes = None if things[8] == "." else things[8]
#Class for holding GFF Record data, no __slots__
class containerGFF():
def __init__(self, things):
self.seqid = None if things[0] == "." else things[0]
self.source = None if things[1] == "." else things[1]
self.type = None if things[2] == "." else things[2]
self.start = None if things[3] == "." else int(things[3])
self.end = None if things[4] == "." else int(things[4])
self.score = None if things[5] == "." else float(things[5])
self.strand = None if things[6] == "." else things[6]
self.phase = None if things[7] == "." else things[7]
self.attributes = None if things[8] == "." else splitAttributes(things[8])
#function to read each GFF element into a namedtuple Collections object
@time_me
def read_gff_TUPLE(g, num):
fields = ["seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes"]
GFFTuple = namedtuple("GFFRecord", fields)
gf = open(g)
with gf as file_object:
for i in range(num):
for line in file_object:
if line.startswith("#"): continue
line = line.strip()
things = line.split("\t")
if len(things) != len(fields):
sys.exit("Fatal error: GFF file is not standard-compatible")
#line = utils.removeURL(line) #Sanitize any URLs out
normalizedInfo = {
"seqid": None if things[0] == "." else things[0],
"source": None if things[1] == "." else things[1],
"type": None if things[2] == "." else things[2],
"start": None if things[3] == "." else int(things[3]),
"end": None if things[4] == "." else int(things[4]),
"score": None if things[5] == "." else float(things[5]),
"strand": None if things[6] == "." else things[6],
"phase": None if things[7] == "." else things[7],
"attributes": None if things[8] == "." else splitAttributes(things[8])
}
ret = GFFTuple(**normalizedInfo)
test = GFFTuple.seqid
test2=GFFTuple.phase
ret = ""
gf.close()
#Function to read each GFF element into a pandas df
@time_me
def read_gff_PANDAS(g, num):
fields = ["seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes"]
GFFTuple = pd.DataFrame(columns=fields)
g2 = open(g)
with g2 as file_object:
for i in range(num):
for line in file_object:
if line.startswith("#"): continue
line = line.strip()
things = line.split("\t")
if len(things) != len(fields):
sys.exit("Fatal error: GFF file is not standard-compatible")
#line = utils.removeURL(line) #Sanitize any URLs out
GFFTuple["seqid"] = None if things[0] == "." else things[0]
GFFTuple["source"] = None if things[1] == "." else things[1]
GFFTuple["type"] = None if things[2] == "." else things[2]
GFFTuple["start"] = None if things[3] == "." else int(things[3])
GFFTuple["end"] = None if things[4] == "." else int(things[4])
GFFTuple["score"] = None if things[5] == "." else float(things[5])
GFFTuple["strand"] = None if things[6] == "." else things[6]
GFFTuple["phase"] = None if things[7] == "." else things[7]
GFFTuple["attributes"] = None if things[8] == "." else things[8]
things = splitAttributes(str(GFFTuple["attributes"]))
ret = GFFTuple
test = GFFTuple["seqid"]
test2 = GFFTuple["phase"]
ret = "" #This is where the yield would go
g2.close()
#Function to read each GFF element into a native dict
@time_me
def read_gff_DICT(g, num):
g3 = open(g)
with g3 as file_object:
for i in range(num):
for line in file_object:
GFFTuple = {}
if line.startswith("#"): continue
line = line.strip()
things = line.split("\t")
if len(things) != 9:
sys.exit("Fatal error: GFF file is not standard-compatible")
#line = utils.removeURL(line) #Sanitize any URLs out
GFFTuple["seqid"] = None if things[0] == "." else things[0]
GFFTuple["source"] = None if things[1] == "." else things[1]
GFFTuple["type"] = None if things[2] == "." else things[2]
GFFTuple["start"] = None if things[3] == "." else int(things[3])
GFFTuple["end"] = None if things[4] == "." else int(things[4])
GFFTuple["score"] = None if things[5] == "." else float(things[5])
GFFTuple["strand"] = None if things[6] == "." else things[6]
GFFTuple["phase"] = None if things[7] == "." else things[7]
GFFTuple["attributes"] = None if things[8] == "." else splitAttributes(things[8])
ret = GFFTuple
test = GFFTuple["seqid"]
test2 = GFFTuple["phase"]
ret = "" #This is where the yield would go
g3.close()
#Function to read each GFF element into a container object
@time_me
def read_gff_CONTAINER(g, num):
g3 = open(g)
with g3 as file_object:
for i in range(num):
for line in file_object:
if line.startswith("#"): continue
line = line.strip()
things = line.split("\t")
if len(things) != 9:
sys.exit("Fatal error: GFF file is not standard-compatible")
#line = utils.removeURL(line) #Sanitize any URLs out
GFFTuple = containerGFF(things)
ret = GFFTuple
test = GFFTuple.seqid
test2 = GFFTuple.phase
ret = "" #This is where the yield would go
g3.close()
#Function to read each GFF element into a container object that uses __slots__
@time_me
def read_gff_SLOTS(g, num):
g3 = open(g)
with g3 as file_object:
for i in range(num):
for line in file_object:
if line.startswith("#"): continue
line = line.strip()
things = line.split("\t")
if len(things) != 9:
sys.exit("Fatal error: GFF file is not standard-compatible")
#line = utils.removeURL(line) #Sanitize any URLs out
GFFTuple = slotContainerGFF(things)
attributes = splitAttributes(GFFTuple.attributes)
ret = GFFTuple
test = GFFTuple.seqid
test2 = GFFTuple.phase
ret = "" #This is where the yield would go
g3.close()
#MAIN
gff = "../examples/example.gff"
print("Testing with namedtuples: 100 iterations")
read_gff_TUPLE(gff, 100)
print("Testing with namedtuples: 1000 iterations")
read_gff_TUPLE(gff, 1000)
print("Testing with namedtuples: 10000 iterations")
read_gff_TUPLE(gff, 10000)
print("Testing with namedtuples: 100000 iterations")
read_gff_TUPLE(gff, 100000)
print("Testing with namedtuples: 1000000 iterations")
read_gff_TUPLE(gff, 1000000)
print()
print("Testing with pandas df: 100 iterations")
read_gff_PANDAS(gff, 100)
print("Testing with pandas df: 1000 iterations")
read_gff_PANDAS(gff, 1000)
print("Testing with pandas df: 10000 iterations")
read_gff_PANDAS(gff, 10000)
print("Testing with pandas df: 100000 iterations")
read_gff_PANDAS(gff, 100000)
print("Testing with pandas df: 1000000 iterations")
read_gff_PANDAS(gff, 1000000)
print()
print("Testing with dict: 100 iterations")
read_gff_DICT(gff, 100)
print("Testing with dict: 1000 iterations")
read_gff_DICT(gff, 1000)
print("Testing with dict: 10000 iterations")
read_gff_DICT(gff, 10000)
print("Testing with dict: 100000 iterations")
read_gff_DICT(gff, 100000)
print("Testing with dict: 1000000 iterations")
read_gff_DICT(gff, 1000000)
print()
print("Testing with container: 100 iterations")
read_gff_CONTAINER(gff, 100)
print("Testing with container: 1000 iterations")
read_gff_CONTAINER(gff, 1000)
print("Testing with container: 10000 iterations")
read_gff_CONTAINER(gff, 10000)
print("Testing with container: 100000 iterations")
read_gff_CONTAINER(gff, 100000)
print("Testing with container: 1000000 iterations")
read_gff_CONTAINER(gff, 1000000)
print()
print("Testing with slots container: 100 iterations")
read_gff_SLOTS(gff, 100)
print("Testing with slots container: 1000 iterations")
read_gff_SLOTS(gff, 1000)
print("Testing with slots container: 10000 iterations")
read_gff_SLOTS(gff, 10000)
print("Testing with slots container: 100000 iterations")
read_gff_SLOTS(gff, 100000)
print("Testing with slots container: 1000000 iterations")
read_gff_SLOTS(gff, 1000000)
| gpl-3.0 |
dilawar/moose-core | python/moose/helper.py | 4 | 2432 | """helper.py:
Some helper functions which are compatible with both python2 and python3.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2017-, Dilawar Singh"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import os
import re
import subprocess
def execute(cmd):
"""execute: Execute a given command.
:param cmd: string, given command.
Return:
------
Return a iterator over output.
"""
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def find_files( dirname, ext=None, name_contains=None, text_regex_search=None):
files = []
for d, sd, fs in os.walk(dirname):
for f in fs:
fpath = os.path.join(d,f)
include = True
if ext is not None:
if f.split('.')[-1] != ext:
include = False
if name_contains:
if name_contains not in os.path.basename(f):
include = False
if text_regex_search:
with open(fpath, 'r' ) as f:
txt = f.read()
if re.search(text_regex_search, txt) is None:
include = False
if include:
files.append(fpath)
return files
# Matplotlib text for running simulation. It make sures at each figure is saved
# to individual png files.
matplotlibText = """
print( '>>>> saving all figues')
import matplotlib.pyplot as plt
def multipage(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pp, format='pdf')
pp.close()
def saveall(prefix='results', figs=None):
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for i, fig in enumerate(figs):
outfile = '%s.%d.png' % (prefix, i)
fig.savefig(outfile)
print( '>>>> %s saved.' % outfile )
plt.close()
try:
saveall()
except Exception as e:
print( '>>>> Error in saving: %s' % e )
quit(0)
"""
| gpl-3.0 |
bibsian/database-development | poplerGUI/ui_logic_splitcolumn.py | 1 | 2989 | #!/usr/bin/env python
from PyQt4 import QtGui, QtCore
from Views import ui_dialog_splitcolumn as dsplitcolumn
from poplerGUI import ui_logic_preview as tprev
from poplerGUI import class_modelviewpandas as view
from poplerGUI.logiclayer import class_helpers as hlp
from poplerGUI import class_inputhandler as ini
class SplitColumnDialog(QtGui.QDialog, dsplitcolumn.Ui_Dialog):
'''
User Logic to deal with split a column from one into two based
on user supplied separator (currently regex does not work)
'''
update_data = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.previous_click = False
# Place holders for user inputs
self.splitcolumnlned = {}
# Place holder: Data Model/ Data model view
self.splitcolumnmodel = None
self.viewEdit = view.PandasTableModelEdit(None)
# Placeholders: Data tables
self.splitcolumntable = None
# Actions
self.btnPreview.clicked.connect(self.submit_change)
self.btnSaveClose.clicked.connect(self.submit_change)
self.btnCancel.clicked.connect(self.close)
# Update boxes/preview box
self.message = QtGui.QMessageBox
self.error = QtGui.QErrorMessage()
self.preview = tprev.TablePreview()
def submit_change(self):
sender = self.sender()
self.splitcolumnlned = {
'column_name':
self.lnedColumnname.text().strip(),
'split_column_by':
self.lnedSplitcolumnby.text()
}
print('split inputs: ', self.splitcolumnlned)
self.splitcolumnini = ini.InputHandler(
name='splitcolumn',
lnedentry=self.splitcolumnlned
)
self.facade.input_register(self.splitcolumnini)
self.facade.create_log_record('splitcolumn')
self._log = self.facade._tablelog['splitcolumn']
if self.previous_click is True:
self.viewEdit = view.PandasTableModelEdit(None)
else:
pass
try:
self.splitcolumntable = hlp.split_column(
self.facade._data,
self.splitcolumnlned['column_name'],
self.splitcolumnlned['split_column_by']
)
self.previous_click = True
hlp.write_column_to_log(
self.splitcolumnlned, self._log, 'splitcolumn')
except Exception as e:
print(str(e))
self.error.showMessage(
'Could not split column: ' + str(e))
if sender is self.btnPreview:
self.viewEdit.set_data(
self.splitcolumntable)
self.preview.tabviewPreview.setModel(
self.viewEdit)
self.preview.show()
elif sender is self.btnSaveClose:
self.facade._data = self.splitcolumntable
self.update_data.emit('update')
self.close()
| mit |
jflamant/sphericalEMC | dataGeneration/energyShellDistribution.py | 1 | 2130 | # file: energyShellDistribution.py
#
# This code computes the energy among spherical harmonic coefficients for
# successive shells. See for instance Fig. 2 (bottom) in our ArXiv preprint
# http://arxiv.org/abs/1602.01301
#
# Copyright (c) J. Flamant, April 2016.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please visit http://www.gnu.org
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as col
from math import pi
wavelength = 2 # in Angstroms
D = 1e-2 # detector distance, in meters
px = 3.38e-5 # pixel size, in meters
re = 2.8179403267*1e-15 # in m
nphotons = 1e13
beamarea = 1e-14 # in m**2
qmax = 4*pi/wavelength # largest scattering vector
grid_size = 64 # number of pixels in the upper half detector
nside = 128
lmax = 3*nside-1 # if running anafast in iterative mode, otherwise use 2*nside
l_even = np.arange(0, lmax, 2) # only even l, due to Friedel symmetry
shell_index = np.arange(1, grid_size+1, 1)
E = np.zeros((lmax+1, grid_size))
Llim = np.zeros(grid_size)
thresh = 1e-5 # a threshold to obtain the bandlimit
for s in range(1, grid_size+1, 1):
print(s)
I = np.load('numpy_array_of_intensity_shell_index_s.npy')
E[:, s-1] = hp.anafast(I, iter=30)
E[:, s-1] = E[:, s-1]/np.sum(E[:, s-1])
# find Llim_s (Bandlimit on shell s) (may require tuning, check ouput values)
x = np.where(E[l_even, s-1] < thresh)
Llim[s-1] = np.min(x)
plt.imshow(E[l_even, :], norm=col.LogNorm(), cmap='viridis')
plt.colorbar()
plt.show()
| gpl-3.0 |
davidgutierrez/HeartRatePatterns | Python/NMF.py | 1 | 3873 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 19 00:50:49 2017
Utils for calculating the NMF
@author: David Gutierrez
"""
from sklearn.decomposition import NMF
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import sys
import os
sys.path.append(os.path.abspath("/home/scidb/HeartRatePatterns/Python"))
from LogisticRegresion import ajustLogisticRegression
def generateNMF(patients, survived, n_components=30):
"""Generates a NMF and gives a Logistic Regression trained,
the model, the table actor, the roc_auc and accurracy of the
Logistic Regresion.
Keyword arguments:
patients -- matrix with the heartbeats of the patients
survived -- list that indicates thet the row of patient survived
n_components -- number of components of the table
"""
nmf = NMF(n_components=n_components, random_state=0, alpha=.1, l1_ratio=0)
patients_nmf = nmf.fit_transform(patients)
m_train, m_test, l_train, l_test = train_test_split(patients_nmf,
survived,
test_size=0.2,
random_state=42)
result = ajustLogisticRegression(m_train, l_train, m_test, l_test)
predict_poba = result['model'].predict_proba(m_train)[:, 1]
result.update({'patients_test':m_test, 'nmf':nmf,
'patients_nmf':patients_nmf, 'predict_poba':predict_poba,
'survived_test':l_train})
return result
#from operator import itemgetter
from scipy.stats.stats import pearsonr
def find_pearson(value, patient, survived):
# pearsonList = []
result = -100
for i in range(value):
patientpear = patient[:, i]
pearson = pearsonr(patientpear, survived)
if pearson[0] > result:
result = pearson[0]
# pearsonList.append({'group':i,'p1':pearson[0],'p2':pearson[1]})
# sortedList = sorted(pearsonList, key=itemgetter('p1'), reverse=True)
return result
def plot_pearson(title,pearson):
leng = range(1, len(pearson)+1)
maxperson = max(pearson)
indxperson = pearson.index(maxperson)
plt.subplot(111)
plt.plot(leng, pearson, lw=2)
plt.annotate('maximo ('+str(maxperson)+","+str(indxperson+2)+")",
xy=(indxperson, maxperson),
xytext=(indxperson+5, maxperson-0.02),
arrowprops=dict(facecolor='black', shrink=0.15))
plt.xlim([1, 100])
plt.title(title)
plt.xlabel('Valor de k en NMF')
plt.show()
def plot_error(title, pearson):
leng = range(2, len(pearson)+2)
plt.subplot(111)
plt.plot(leng, pearson, lw=2)
plt.title(title)
plt.xlabel('Valor de k en NMF')
plt.show()
def find_best_NMF(patients, survived):
fig_size = [16, 4]
plt.rcParams["figure.figsize"] = fig_size
result = []
old_err = None
for value in range(2, 100):
print(value,end=",")
diction = generateNMF(patients, survived, n_components=value)
err_new = diction['nmf'].reconstruction_err_
diff_err = None if old_err is None else old_err-err_new
old_err = err_new
# diction.update({'n_components':value})
result.append({'pearson':find_pearson(value, diction['patients_nmf'], survived),
'recostrucción error': err_new,
'diffErr':diff_err,
'accuracy':diction['accuracy'],
'roc_auc':diction['roc_auc']})
plot_pearson('pearson',[d['pearson'] for d in result])
plot_error('recostrucción error',
[d['recostrucción error'] for d in result])
plot_error('diferencia del Error', [d['diffErr'] for d in result])
plot_pearson('Presición', [d['accuracy'] for d in result])
plot_pearson('Area bajo la curva', [d['roc_auc'] for d in result])
| gpl-3.0 |
iismd17/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
alcor-dtu/ml-poster | code/SdADenoisingPlot.py | 1 | 20810 | """
This tutorial introduces stacked denoising auto-encoders (SdA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
from __future__ import print_function
import os
import sys
import timeit
import numpy
import pickle
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import matplotlib
import matplotlib.pyplot as plt
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from ImageDenoising import dA, loadDatasets, filterImages,saveImage
# start-snippet-1
class SdA(object):
"""Stacked denoising auto-encoder class (SdA)
A stacked denoising autoencoder model is obtained by stacking several
dAs. The hidden layer of the dA at layer `i` becomes the input of
the dA at layer `i+1`. The first layer dA gets as input the input of
the SdA, and the hidden layer of the last dA represents the output.
Note that after pretraining, the SdA is dealt with as a normal MLP,
the dAs are only used to initialize the weights.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
n_ins=784,
hidden_layers_sizes=[500, 500],
n_outs=10,
corruption_levels=[0.1, 0.1]
):
""" This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the sdA
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type corruption_levels: list of float
:param corruption_levels: amount of corruption to use for each
layer
"""
# self.sigmoid_layers = []
self.sigmoid_noise_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x', dtype='float32') # the data is presented as rasterized images
self.noise_x = T.matrix('noise_x', dtype='float32')
self.y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# end-snippet-1
# The SdA is an MLP, for which all weights of intermediate layers
# are shared with a different denoising autoencoders
# We will first construct the SdA as a deep multilayer perceptron,
# and when constructing each sigmoidal layer we also construct a
# denoising autoencoder that shares weights with that layer
# During pretraining we will train these autoencoders (which will
# lead to chainging the weights of the MLP as well)
# During finetunining we will finish training the SdA by doing
# stochastich gradient descent on the MLP
# start-snippet-2
for i in range(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# input_size = n_ins
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
layer_noise_input = self.noise_x
else:
layer_input = self.x
for ddAA in self.dA_layers:
layer_input = ddAA.get_hidden_values(layer_input)
# theano.printing.debugprint(layer_input)
layer_noise_input = self.sigmoid_noise_layers[-1].output
# theano.printing.debugprint(layer_noise_input)
sigmoid_noise_layer = HiddenLayer(rng=numpy_rng,
input=layer_noise_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_noise_layers.append(sigmoid_noise_layer)
# its arguably a philosophical question...
# but we are going to only declare that the parameters of the
# sigmoid_layers are parameters of the StackedDAA
# the visible biases in the dA are parameters of those
# dA, but not the SdA
self.params.extend(sigmoid_noise_layer.params)
# Construct a denoising autoencoder that shared weights with this
# layer
dA_layer = dA(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
noiseInput = layer_noise_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_noise_layer.W,
bhid=sigmoid_noise_layer.b)
self.dA_layers.append(dA_layer)
# end-snippet-2
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_noise_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs
)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.cost_function(self.x)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
# self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, train_set_x_noise, batch_size):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: float
:param learning_rate: learning rate used during training for any of
the dA layers
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
# % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_updates(learning_rate)
# compile the theano function
#TODO remove corruption
fn = theano.function(
inputs=[
index,
theano.In(learning_rate, value=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end],
self.noise_x: train_set_x_noise[batch_begin: batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, train_set_x, train_set_x_noise, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(self.params, gparams)
]
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.noise_x: train_set_x_noise[
index * batch_size: (index + 1) * batch_size
]
},
name='train'
)
return train_fn
def get_denoised_patch_function(self, patch):
x = patch
for dA in self.dA_layers:
x = dA.get_hidden_values(x)
# z = dA.get_reconstructed_input(x)
z = self.logLayer.get_denoised_patch_function(x)
return z
# z = self.dA_layers[-1].get_reconstructed_input(x)
# return x
def filterImagesSdA(noise_datasets, sda):
d = noise_datasets.copy()
rgb = ('r', 'g', 'b')
x = T.vector('x', dtype='float32')
evaluate = theano.function(
[x],
sda.get_denoised_patch_function(x)
)
for c in rgb:
imgs = numpy.array(d[c]['data'], dtype='float32')
for idx in range(0, imgs.shape[0],1):
# print("denoising: " + c + str(idx) )
X = imgs[idx]
Z = evaluate(X)
d[c]['data'][idx] = Z
return d
def unpickle(file):
fo = open(file, 'rb')
d = pickle.load(fo)
fo.close()
return d
def saveTrainedData(path, sda):
d = {}
d["SdA"] = {"data" : sda}
ff = open(path, "wb")
pickle.dump(d, ff, protocol=pickle.HIGHEST_PROTOCOL)
ff.close()
def loadTrainedData(path):
d = unpickle(path)
sda = d["SdA"]["data"]
results =(sda)
return results
#TODO change parameters to use our datasets
def test_SdA(finetune_lr=0.1, pretraining_epochs=1000,
pretrain_lr=0.5, training_epochs=1000,
hidden_layers_fraction = [0.5, 0.5, 0.5],
noise_dataset_samples = 5
):
dataset_base = "sponzat_0"
dataset_name = dataset_base + "_10000"
result_folder = "./result_images"
noise_dataset_name = dataset_base +'_'+ str(noise_dataset_samples)
clean_patches_f, noisy_patches_f, clean_datasets, noisy_datasets, patch_size = loadDatasets(dataset_name, noise_dataset_name)
Width = patch_size[0]
Height= patch_size[1]
hidden_layers_sizes = [int(f*Width * Height) for f in hidden_layers_fraction]
batch_size = clean_patches_f.shape[0]//2
layers_string = ""
for idx in xrange(len(hidden_layers_sizes)):
layers_string = layers_string + "_" +str(idx)+ "L" +str(hidden_layers_sizes[idx])
parameters_name = ('_SdA_pretrain' + str(pretraining_epochs)+ '_tuning'+ str(training_epochs)
+ layers_string + '_tunerate' + str(finetune_lr)
+ '_pretrainrate' + str(pretrain_lr)+'_W' +str(Width)
+'_minibatch' + str(batch_size))
path = 'training/trained_variables_' + noise_dataset_name + parameters_name +'.dat'
train_set_x = theano.shared(clean_patches_f)
train_set_x_noise = theano.shared(noisy_patches_f)
isTrained = os.path.isfile(path)
if not isTrained:
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches //= batch_size
# numpy random generator
# start-snippet-3
numpy_rng = numpy.random.RandomState(1)
print('... building the model')
# construct the stacked denoising autoencoder class
sda = SdA(
numpy_rng=numpy_rng,
n_ins=Width * Height,
hidden_layers_sizes=hidden_layers_sizes,
n_outs=Width * Height
)
# end-snippet-3 start-snippet-4
#########################
# PRETRAINING THE MODEL #
#########################
print('... getting the pretraining functions')
pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
train_set_x_noise = train_set_x_noise,
batch_size=batch_size)
print('... pre-training the model')
start_time = timeit.default_timer()
## Pre-train layer-wise
layers_cost = []
for i in range(sda.n_layers):
# go through pretraining epochs
layer_cost = []
for epoch in range(pretraining_epochs):
# go through the training set
c = []
if epoch % 100== 0 and epoch > 0 :
pretrain_lr = pretrain_lr*0.5
if epoch % 100 == 0 and epoch >0 :
if numpy.abs(layer_cost[-1]-layer_cost[-2]) < 0.01:
pretrain_lr = 2 * pretrain_lr
for batch_index in range(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,lr=pretrain_lr))
if epoch % 1 == 0:
print('Pre-training layer %i, epoch %d, cost %f' % (i, epoch, numpy.mean(c)))
layer_cost.append(numpy.mean(c))
layers_cost.append(layer_cost)
end_time = timeit.default_timer()
print(('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
########################
# FINETUNING THE MODEL #
########################
# get the training, validation and testing function for the model
print('... getting the finetuning functions')
train_fn = sda.build_finetune_functions(
train_set_x = train_set_x,
train_set_x_noise = train_set_x_noise,
batch_size=batch_size,
learning_rate=finetune_lr
)
print('... finetunning the model')
start_time = timeit.default_timer()
epoch = 0
finetune_costs = []
while (epoch < training_epochs): # and (not done_looping)
epoch = epoch + 1
c = []
for minibatch_index in range(n_train_batches):
c.append(train_fn(minibatch_index))
if epoch % 1 == 0:
print('fine tuning, epoch %d, cost %f' % (epoch, numpy.mean(c)))
finetune_costs.append(numpy.mean(c))
end_time = timeit.default_timer()
print(('The training code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
else:
sda = loadTrainedData(path)
return layers_cost, finetune_costs
# d = filterImagesSdA(noisy_datasets, sda)
# saveTrainedData(path, sda)
# saveImage(d, noise_dataset_name + parameters_name,
# result_folder)
# # end-snippet-4
if __name__ == '__main__':
plt.close('all')
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
pretrain_lrs = [1]
finetune_lrs = [0.01, 0.1, 1]
hidden_layers_fraction = [0.5, 0.5]
figures =[[]]
axes =[[]]
tune_figures = []
tune_axes = []
for fine_l in xrange(0, len(finetune_lrs)):
tune_figures.append(plt.figure())
tune_axes.append(tune_figures[fine_l].add_subplot(111))
tune_axes[fine_l].set_title('finetuning lr, lr ' + str(l) + ', tune_lr ' + str(finetune_lrs[fine_l]))
tune_axes[fine_l].set_ylabel('Cost')
tune_axes[fine_l].set_xlabel('Epoch')
# for l in xrange(0, len(hidden_layers_fraction)):
# figures.append([])
# figures[fine_l].append(plt.figure())
# axes.append([])
# axes[fine_l].append(figures[fine_l][l].add_subplot(111))
# axes[fine_l][l].set_title('pretraining lr, layer ' + str(l) + ', tune_lr ' + str(finetune_lrs[fine_l]))
# axes[fine_l][l].set_ylabel('Cost')
# axes[fine_l][l].set_xlabel('Epoch')
for fine_l in xrange(0, len(finetune_lrs)):
finetune_lr = finetune_lrs[fine_l]
for lr in pretrain_lrs:
costs, tune_costs = test_SdA(pretrain_lr = lr, hidden_layers_fraction=hidden_layers_fraction,finetune_lr=finetune_lr)
# for idx in xrange(0, len(costs)):
# axes[fine_l][idx].plot(costs[idx], label='lr: '+str(lr))
# axes[fine_l][idx].set_ylim([0,1500])
tune_axes[fine_l].plot(tune_costs, label='tune_lr: '+str(finetune_lr))
tune_axes[fine_l].set_ylim([0,1500])
# for fine_l in xrange(0, len(finetune_lrs)):
#
# for idx in xrange(0, len(axes[fine_l])):
# f = figures[fine_l][idx]
# ax = axes[fine_l][idx]
#
#
# leg = ax.legend(loc='upper left')
for fine_l in xrange(0, len(finetune_lrs)):
f = tune_figures[fine_l]
ax = tune_axes[fine_l]
leg = ax.legend(loc='upper left')
plt.show()
| gpl-3.0 |
teto/home | config/ipython/profile_default/ipython_config.py | 1 | 19767 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.TerminalIPythonApp.profile = 'default'
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = ''
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = ''
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
#
# c.TerminalInteractiveShell.debug = False
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 3.4.3+ (default, Oct 14 2015, 16:03:50) \nType "copyright", "credits" or "license" for more information.\n\nIPython 2.3.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'nvim'
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
#
# c.TerminalInteractiveShell.separate_out = ''
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.history_length = 10000
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
#
# c.TerminalInteractiveShell.readline_use = True
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
#
# c.PromptManager.color_scheme = 'Linux'
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = ''
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#
# c.PlainTextFormatter.deferred_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| gpl-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/tslibs/test_libfrequencies.py | 2 | 2647 | # -*- coding: utf-8 -*-
import pytest
from pandas._libs.tslibs.frequencies import (
INVALID_FREQ_ERR_MSG, _period_str_to_code, get_rule_month, is_subperiod,
is_superperiod)
from pandas.tseries import offsets
@pytest.mark.parametrize("obj,expected", [
("W", "DEC"),
(offsets.Week(), "DEC"),
("D", "DEC"),
(offsets.Day(), "DEC"),
("Q", "DEC"),
(offsets.QuarterEnd(startingMonth=12), "DEC"),
("Q-JAN", "JAN"),
(offsets.QuarterEnd(startingMonth=1), "JAN"),
("A-DEC", "DEC"),
("Y-DEC", "DEC"),
(offsets.YearEnd(), "DEC"),
("A-MAY", "MAY"),
("Y-MAY", "MAY"),
(offsets.YearEnd(month=5), "MAY")
])
def test_get_rule_month(obj, expected):
result = get_rule_month(obj)
assert result == expected
@pytest.mark.parametrize("obj,expected", [
("A", 1000),
("A-DEC", 1000),
("A-JAN", 1001),
("Y", 1000),
("Y-DEC", 1000),
("Y-JAN", 1001),
("Q", 2000),
("Q-DEC", 2000),
("Q-FEB", 2002),
("W", 4000),
("W-SUN", 4000),
("W-FRI", 4005),
("Min", 8000),
("ms", 10000),
("US", 11000),
("NS", 12000)
])
def test_period_str_to_code(obj, expected):
assert _period_str_to_code(obj) == expected
@pytest.mark.parametrize("p1,p2,expected", [
# Input validation.
(offsets.MonthEnd(), None, False),
(offsets.YearEnd(), None, False),
(None, offsets.YearEnd(), False),
(None, offsets.MonthEnd(), False),
(None, None, False),
(offsets.YearEnd(), offsets.MonthEnd(), True),
(offsets.Hour(), offsets.Minute(), True),
(offsets.Second(), offsets.Milli(), True),
(offsets.Milli(), offsets.Micro(), True),
(offsets.Micro(), offsets.Nano(), True)
])
def test_super_sub_symmetry(p1, p2, expected):
assert is_superperiod(p1, p2) is expected
assert is_subperiod(p2, p1) is expected
@pytest.mark.parametrize("freq,expected,aliases", [
("D", 6000, ["DAY", "DLY", "DAILY"]),
("M", 3000, ["MTH", "MONTH", "MONTHLY"]),
("N", 12000, ["NANOSECOND", "NANOSECONDLY"]),
("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"]),
("T", 8000, ["minute", "MINUTE", "MINUTELY"]),
("L", 10000, ["MILLISECOND", "MILLISECONDLY"]),
("U", 11000, ["MICROSECOND", "MICROSECONDLY"]),
("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"]),
("B", 5000, ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY"]),
])
def test_assert_aliases_deprecated(freq, expected, aliases):
assert isinstance(aliases, list)
assert _period_str_to_code(freq) == expected
for alias in aliases:
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
_period_str_to_code(alias)
| bsd-3-clause |
gfyoung/pandas | pandas/io/parsers/base_parser.py | 1 | 36878 | from collections import defaultdict
import csv
import datetime
import itertools
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Union, cast
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas._typing import DtypeArg, FilePathOrBuffer
from pandas.errors import ParserError, ParserWarning
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
from pandas.core.arrays import Categorical
from pandas.core.indexes.api import Index, MultiIndex, ensure_index_from_sequences
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
from pandas.io.common import IOHandles, get_handle
from pandas.io.date_converters import generic_parser
parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"lineterminator": None,
"header": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_format": False,
"skip_blank_lines": True,
}
class ParserBase:
def __init__(self, kwds):
self.names = kwds.get("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.get("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.get("na_values")
self.na_fvalues = kwds.get("na_fvalues")
self.na_filter = kwds.get("na_filter", False)
self.keep_default_na = kwds.get("keep_default_na", True)
self.true_values = kwds.get("true_values")
self.false_values = kwds.get("false_values")
self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True)
self.infer_datetime_format = kwds.pop("infer_datetime_format", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format,
cache_dates=self.cache_dates,
)
# validate header options for mi
self.header = kwds.get("header")
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if any(i < 0 for i in self.header):
raise ValueError(
"cannot specify multi-index header with negative integers"
)
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
)
if kwds.get("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
elif self.header is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header is not None"
)
# GH 16338
elif not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
# GH 27779
elif self.header < 0:
raise ValueError(
"Passing negative integer to header is invalid. "
"For no header, use header=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = get_handle(
src,
"r",
encoding=kwds.get("encoding", None),
compression=kwds.get("compression", None),
memory_map=kwds.get("memory_map", False),
storage_options=kwds.get("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the dataframe.
Raises
------
ValueError
If column to parse_date is not in dataframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# get only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if isinstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return isinstance(self.parse_dates, dict) or (
isinstance(self.parse_dates, list)
and len(self.parse_dates) > 0
and isinstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = i if self.index_col is None else self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers
"""
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, _, _ = self._clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header)))
names = ic + columns
# If we find unnamed columns all in a single
# level, then our header was too long.
for n in range(len(columns[0])):
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header = ",".join(str(x) for x in self.header)
raise ParserError(
f"Passed header=[{header}] are too many rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header
]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# pandas\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = self._clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
# pandas\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if isinstance(self.na_values, dict):
# pandas\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool=False
)
else:
is_ea = is_extension_array_dtype(cast_type)
is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
# skip inference if specified dtype is object
# or casting to an EA
try_num_bool = not (cast_type and is_str_or_ea_dtype)
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool
)
# type specified in dtype param or cast_type is an EA
if cast_type and (
not is_dtype_equal(cvals, cast_type)
or is_extension_array_dtype(cast_type)
):
if not is_ea and na_count > 0:
try:
if is_bool_dtype(cast_type):
raise ValueError(
f"Bool column has NA values in column {c}"
)
except (AttributeError, TypeError):
# invalid input to is_bool_dtype
pass
cast_type = pandas_dtype(cast_type)
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
if verbose and na_count:
print(f"Filled {na_count} NA values in column {c!s}")
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns
-------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool and is_object_dtype(values.dtype):
# exclude e.g DatetimeIndex here
try:
result = lib.maybe_convert_numeric(values, na_values, False)
except (ValueError, TypeError):
# e.g. encountering datetime string gets ValueError
# TypeError can be raised in floatify
result = values
na_count = parsers.sanitize_objects(result, na_values, False)
else:
na_count = isna(result).sum()
else:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = libops.maybe_convert_bool(
np.asarray(values),
true_values=self.true_values,
false_values=self.false_values,
)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (
isinstance(cast_type, CategoricalDtype)
and cast_type.categories is not None
)
if not is_object_dtype(values) and not known_cats:
# TODO: this is for consistency with
# c-parser which parses all categories
# as strings
values = astype_nansafe(values, str)
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type, true_values=self.true_values
)
# use the EA's implementation of casting
elif is_extension_array_dtype(cast_type):
# ensure cast_type is an actual dtype and not a string
cast_type = pandas_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
if is_bool_dtype(cast_type):
return array_type._from_sequence_of_strings(
values,
dtype=cast_type,
true_values=self.true_values,
false_values=self.false_values,
)
else:
return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError as err:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
"_from_sequence_of_strings in order to be used in parser methods"
) from err
else:
try:
values = astype_nansafe(values, cast_type, copy=True, skipna=True)
except ValueError as err:
raise ValueError(
f"Unable to convert column {column} to type {cast_type}"
) from err
return values
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
data, names = _process_date_conversion(
data,
self._date_conv,
self.parse_dates,
self.index_col,
self.index_names,
names,
keep_date_col=self.keep_date_col,
)
return names, data
def _evaluate_usecols(self, usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(self, usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: "
f"{missing}"
)
return usecols
def _validate_usecols_arg(self, usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _clean_index_names(self, columns, index_col, unnamed_cols):
if not is_index_col(index_col):
return None, columns, index_col
columns = list(columns)
# In case of no rows and multiindex columns we have to set index_names to
# list of Nones GH#38292
if not columns:
return [None] * len(index_col), columns, index_col
cp_cols = list(columns)
index_names = []
# don't mutate
index_col = list(index_col)
for i, c in enumerate(index_col):
if isinstance(c, str):
index_names.append(c)
for j, name in enumerate(cp_cols):
if name == c:
index_col[i] = j
columns.remove(name)
break
else:
name = cp_cols[c]
columns.remove(name)
index_names.append(name)
# Only clean index names that were placeholders.
for i, name in enumerate(index_names):
if isinstance(name, str) and name in unnamed_cols:
# pandas\io\parsers.py:3445: error: No overload variant of
# "__setitem__" of "list" matches argument types "int", "None"
# [call-overload]
index_names[i] = None # type: ignore[call-overload]
return index_names, columns, index_col
def _get_empty_meta(
self, columns, index_col, index_names, dtype: Optional[DtypeArg] = None
):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not is_dict_like(dtype):
# if dtype == None, default will be object.
default_dtype = dtype or object
dtype = defaultdict(lambda: default_dtype)
else:
dtype = cast(dict, dtype)
dtype = defaultdict(
lambda: object,
{columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
)
# Even though we have no data, the "index" of the empty DataFrame
# could for example still be an empty MultiIndex. Thus, we need to
# check whether we have any index columns specified, via either:
#
# 1) index_col (column indices)
# 2) index_names (column names)
#
# Both must be non-null to ensure a successful construction. Otherwise,
# we have to create a generic empty Index.
if (index_col is None or index_col is False) or index_names is None:
index = Index([])
else:
data = [Series([], dtype=dtype[name]) for name in index_names]
index = ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = {col_name: Series([], dtype=dtype[col_name]) for col_name in columns}
return index, columns, col_dict
def _make_date_converter(
date_parser=None, dayfirst=False, infer_datetime_format=False, cache_dates=True
):
def converter(*date_cols):
if date_parser is None:
strs = parsing.concat_date_cols(date_cols)
try:
return tools.to_datetime(
ensure_object(strs),
utc=None,
dayfirst=dayfirst,
errors="ignore",
infer_datetime_format=infer_datetime_format,
cache=cache_dates,
).to_numpy()
except ValueError:
return tools.to_datetime(
parsing.try_parse_dates(strs, dayfirst=dayfirst), cache=cache_dates
)
else:
try:
result = tools.to_datetime(
date_parser(*date_cols), errors="ignore", cache=cache_dates
)
if isinstance(result, datetime.datetime):
raise Exception("scalar parser")
return result
except Exception:
try:
return tools.to_datetime(
parsing.try_parse_dates(
parsing.concat_date_cols(date_cols),
parser=date_parser,
dayfirst=dayfirst,
),
errors="ignore",
)
except Exception:
return generic_parser(date_parser, *date_cols)
return converter
def _process_date_conversion(
data_dict,
converter,
parse_spec,
index_col,
index_names,
columns,
keep_date_col=False,
):
def _isindex(colspec):
return (isinstance(index_col, list) and colspec in index_col) or (
isinstance(index_names, list) and colspec in index_names
)
new_cols = []
new_data = {}
orig_names = columns
columns = list(columns)
date_cols = set()
if parse_spec is None or isinstance(parse_spec, bool):
return data_dict, columns
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
if is_scalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names
)
if new_name in data_dict:
raise ValueError(f"New date column already in dict {new_name}")
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(parse_spec, dict):
# dict of new name to column list
for new_name, colspec in parse_spec.items():
if new_name in data_dict:
raise ValueError(f"Date column {new_name} already in dict")
_, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names
)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _try_convert_dates(parser, colspec, data_dict, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int) and c not in columns:
colnames.append(columns[c])
else:
colnames.append(c)
new_name = "_".join(str(x) for x in colnames)
to_parse = [data_dict[c] for c in colnames if c in data_dict]
new_col = parser(*to_parse)
return new_name, new_col, colnames
def _get_na_values(col, na_values, na_fvalues, keep_default_na):
"""
Get the NaN values for a given column.
Parameters
----------
col : str
The name of the column.
na_values : array-like, dict
The object listing the NaN values as strings.
na_fvalues : array-like, dict
The object listing the NaN values as floats.
keep_default_na : bool
If `na_values` is a dict, and the column is not mapped in the
dictionary, whether to return the default NaN values or the empty set.
Returns
-------
nan_tuple : A length-two tuple composed of
1) na_values : the string NaN values for that column.
2) na_fvalues : the float NaN values for that column.
"""
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
if keep_default_na:
return STR_NA_VALUES, set()
return set(), set()
else:
return na_values, na_fvalues
# Seems to be unused
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int):
colnames.append(columns[c])
return colnames
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or isinstance(index_col, bool):
index_col = []
return (
len(columns)
and not isinstance(columns, MultiIndex)
and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
)
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
def is_index_col(col):
return col is not None and col is not False
| bsd-3-clause |
OshynSong/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
ElDeveloper/qiita | qiita_db/metadata_template/base_metadata_template.py | 2 | 63901 | r"""
Metadata template objects (:mod: `qiita_db.metadata_template)
=============================================================
..currentmodule:: qiita_db.metadata_template
This module provides the MetadataTemplate base class and the subclasses
SampleTemplate and PrepTemplate.
Classes
-------
..autosummary::
:toctree: generated/
BaseSample
Sample
PrepSample
MetadataTemplate
SampleTemplate
PrepTemplate
Methods
-------
..autosummary::
:toctree: generated/
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from itertools import chain
from copy import deepcopy
from datetime import datetime
from json import loads, dumps
import pandas as pd
import numpy as np
from skbio.util import find_duplicates
import warnings
from qiita_core.exceptions import IncompetentQiitaDeveloperError
import qiita_db as qdb
from string import ascii_letters, digits
# this is the name of the sample where we store all columns for a sample/prep
# information
QIITA_COLUMN_NAME = 'qiita_sample_column_names'
class BaseSample(qdb.base.QiitaObject):
r"""Sample object that accesses the db to get the information of a sample
belonging to a PrepTemplate or a SampleTemplate.
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template obj to which the sample belongs to
Methods
-------
__eq__
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
exists
keys
values
items
get
See Also
--------
QiitaObject
Sample
PrepSample
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_id_column = None
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : MetadataTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If its call directly from the Base class
If `md_template` doesn't have the correct type
"""
raise IncompetentQiitaDeveloperError()
def __init__(self, sample_id, md_template):
r"""Initializes the object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template in which the sample is present
Raises
------
QiitaDBUnknownIDError
If `sample_id` does not correspond to any sample in md_template
"""
# Check that we are not instantiating the base class
self._check_subclass()
# Check that the md_template is of the correct type
self._check_template_class(md_template)
# Check if the sample id is present on the passed metadata template
# This test will check that the sample id is actually present on the db
if sample_id not in md_template:
raise qdb.exceptions.QiitaDBUnknownIDError(
sample_id, self.__class__.__name__)
# Assign private attributes
self._id = sample_id
self._md_template = md_template
self._dynamic_table = "%s%d" % (self._table_prefix,
self._md_template.id)
def __hash__(self):
r"""Defines the hash function so samples are hashable"""
return hash(self._id)
def __eq__(self, other):
r"""Self and other are equal based on type and ids"""
if not isinstance(other, type(self)):
return False
if other._id != self._id:
return False
if other._md_template != self._md_template:
return False
return True
@classmethod
def exists(cls, sample_id, md_template):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template to which the sample belongs to
Returns
-------
bool
True if already exists. False otherwise.
"""
with qdb.sql_connection.TRN:
cls._check_subclass()
sql = """SELECT EXISTS(
SELECT * FROM qiita.{0}
WHERE sample_id=%s AND {1}=%s
)""".format(cls._table, cls._id_column)
qdb.sql_connection.TRN.add(sql, [sample_id, md_template.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def _get_categories(self):
r"""Returns all the available metadata categories for the sample
Returns
-------
set of str
The set of all available metadata categories
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_values->>'columns'
FROM qiita.{0}
WHERE sample_id = '{1}'""".format(
self._dynamic_table, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql)
results = qdb.sql_connection.TRN.execute_fetchflatten()
if results:
results = loads(results[0])
return set(results)
def _to_dict(self):
r"""Returns the categories and their values in a dictionary
Returns
-------
dict of {str: str}
A dictionary of the form {category: value}
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_values
FROM qiita.{0}
WHERE sample_id=%s""".format(self._dynamic_table)
qdb.sql_connection.TRN.add(sql, [self._id])
result = qdb.sql_connection.TRN.execute_fetchindex()
return result[0]['sample_values']
def __len__(self):
r"""Returns the number of metadata categories
Returns
-------
int
The number of metadata categories
"""
# return the number of columns
return len(self._get_categories())
def __getitem__(self, key):
r"""Returns the value of the metadata category `key`
Parameters
----------
key : str
The metadata category
Returns
-------
obj
The value of the metadata category `key`
Raises
------
KeyError
If the metadata category `key` does not exists
See Also
--------
get
"""
with qdb.sql_connection.TRN:
key = key.lower()
if key not in self._get_categories():
# The key is not available for the sample, so raise a KeyError
raise KeyError(
"Metadata category %s does not exists for sample %s"
" in template %d" % (key, self._id, self._md_template.id))
sql = """SELECT sample_values->>'{0}' as {0}
FROM qiita.{1}
WHERE sample_id = %s""".format(
key, self._dynamic_table)
qdb.sql_connection.TRN.add(sql, [self._id])
return qdb.sql_connection.TRN.execute_fetchlast()
def setitem(self, column, value):
"""Sets `value` as value for the given `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
Raises
------
QiitaDBColumnError
If the column does not exist in the table
"""
# Check if the column exist in the table
if column not in self._get_categories():
raise qdb.exceptions.QiitaDBColumnError(
"Column %s does not exist in %s" %
(column, self._dynamic_table))
sql = """UPDATE qiita.{0}
SET sample_values = sample_values || %s
WHERE sample_id = %s""".format(self._dynamic_table)
qdb.sql_connection.perform_as_transaction(
sql, [dumps({column: value}), self.id])
def __setitem__(self, column, value):
r"""Sets the metadata value for the category `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
"""
with qdb.sql_connection.TRN:
self.setitem(column, value)
qdb.sql_connection.TRN.execute()
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise qdb.exceptions.QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the metadata keys
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
return iter(self._get_categories())
def __contains__(self, key):
r"""Checks if the metadata category `key` is present
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the metadata category `key` is present, false otherwise
"""
return key.lower() in self._get_categories()
def keys(self):
r"""Iterator over the metadata categories
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values, in metadata category order
Returns
-------
Iterator
Iterator over metadata values
"""
d = self._to_dict()
return d.values()
def items(self):
r"""Iterator over (category, value) tuples
Returns
-------
Iterator
Iterator over (category, value) tuples
"""
d = self._to_dict()
return d.items()
def get(self, key):
r"""Returns the metadata value for category `key`, or None if the
category `key` is not present
Parameters
----------
key : str
The metadata category
Returns
-------
Obj or None
The value object for the category `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
class MetadataTemplate(qdb.base.QiitaObject):
r"""Metadata map object that accesses the db to get the sample/prep
template information
Attributes
----------
id
Methods
-------
exists
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
keys
values
items
get
to_file
add_filepath
update
metadata_headers
delete_column
See Also
--------
QiitaObject
SampleTemplate
PrepTemplate
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_id_column = None
_sample_cls = None
# forbidden_words not defined for base class. Please redefine for
# sub-classes.
_forbidden_words = {}
def _check_id(self, id_):
r"""Checks that the MetadataTemplate id_ exists on the database"""
with qdb.sql_connection.TRN:
sql = "SELECT EXISTS(SELECT * FROM qiita.{0} WHERE {1}=%s)".format(
self._table, self._id_column)
qdb.sql_connection.TRN.add(sql, [id_])
return qdb.sql_connection.TRN.execute_fetchlast()
@classmethod
def _table_name(cls, obj_id):
r"""Returns the dynamic table name
Parameters
----------
obj_id : int
The id of the metadata template
Returns
-------
str
The table name
Raises
------
IncompetentQiitaDeveloperError
If called from the base class directly
"""
if not cls._table_prefix:
raise IncompetentQiitaDeveloperError(
"_table_prefix should be defined in the subclasses")
return "%s%d" % (cls._table_prefix, obj_id)
@classmethod
def _clean_validate_template(cls, md_template, study_id,
current_columns=None):
"""Takes care of all validation and cleaning of metadata templates
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
study_id : int
The study to which the metadata template belongs to.
current_columns : iterable of str, optional
The current list of metadata columns
Returns
-------
md_template : DataFrame
Cleaned deep-copy of the input md_template:
Removes 'qiita_study_id' and 'qiita_prep_id' columns,
if present.
Raises
------
QiitaDBColumnError
If the column names in md_template contains invalid characters,
forbidden words, or PostgreSQL-reserved words.
QiitaDBWarning
If there are missing columns required for some functionality
"""
cls._check_subclass()
invalid_ids = qdb.metadata_template.util.get_invalid_sample_names(
md_template.index)
if invalid_ids:
raise qdb.exceptions.QiitaDBColumnError(
"The following sample names in the template contain invalid "
"characters (only alphanumeric characters or periods are "
"allowed): %s." % ", ".join(invalid_ids))
if len(set(md_template.index)) != len(md_template.index):
raise qdb.exceptions.QiitaDBDuplicateSamplesError(
find_duplicates(md_template.index))
# We are going to modify the md_template. We create a copy so
# we don't modify the user one
md_template = md_template.copy(deep=True)
# In the database, all the column headers are lowercase
md_template.columns = [c.lower() for c in md_template.columns]
# drop these columns in the result
if 'qiita_study_id' in md_template.columns:
del md_template['qiita_study_id']
if 'qiita_prep_id' in md_template.columns:
del md_template['qiita_prep_id']
# validating pgsql reserved words not to be column headers
current_headers = set(md_template.columns.values)
# testing for specific column names that are not included in the other
# tests.
pgsql_reserved = cls._identify_pgsql_reserved_words_in_column_names(
current_headers)
invalid = cls._identify_column_names_with_invalid_characters(
current_headers)
forbidden = cls._identify_forbidden_words_in_column_names(
current_headers)
qiime2_reserved = cls._identify_qiime2_reserved_words_in_column_names(
current_headers)
error = []
if pgsql_reserved:
error.append(
"The following column names in the template contain PgSQL "
"reserved words: %s." % ", ".join(pgsql_reserved))
if invalid:
error.append(
"The following column names in the template contain invalid "
"chars: %s." % ", ".join(invalid))
if forbidden:
error.append(
"The following column names in the template contain invalid "
"values: %s." % ", ".join(forbidden))
if qiime2_reserved:
error.append(
"The following column names in the template contain QIIME2 "
"reserved words: %s." % ", ".join(pgsql_reserved))
if error:
raise qdb.exceptions.QiitaDBColumnError(
"%s\nYou need to modify them." % '\n'.join(error))
# Prefix the sample names with the study_id
qdb.metadata_template.util.prefix_sample_names_with_id(md_template,
study_id)
# Check that we don't have duplicate columns
if len(set(md_template.columns)) != len(md_template.columns):
raise qdb.exceptions.QiitaDBDuplicateHeaderError(
find_duplicates(md_template.columns))
return md_template
@classmethod
def _common_creation_steps(cls, md_template, obj_id):
r"""Executes the common creation steps
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
obj_id : int
The id of the object being created
"""
with qdb.sql_connection.TRN:
cls._check_subclass()
# Get some useful information from the metadata template
sample_ids = md_template.index.tolist()
headers = sorted(md_template.keys().tolist())
if not headers:
raise ValueError("Your info file only has sample_name")
# Insert values on template_sample table
values = [[obj_id, s_id] for s_id in sample_ids]
sql = """INSERT INTO qiita.{0} ({1}, sample_id)
VALUES (%s, %s)""".format(cls._table, cls._id_column)
qdb.sql_connection.TRN.add(sql, values, many=True)
# Create table with custom columns
table_name = cls._table_name(obj_id)
sql = """CREATE TABLE qiita.{0} (
sample_id VARCHAR NOT NULL PRIMARY KEY,
sample_values JSONB NOT NULL)""".format(table_name)
qdb.sql_connection.TRN.add(sql)
values = dumps({"columns": md_template.columns.tolist()})
sql = """INSERT INTO qiita.{0} (sample_id, sample_values)
VALUES ('{1}', %s)""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
values = [(k, df.to_json()) for k, df in md_template.iterrows()]
sql = """INSERT INTO qiita.{0} (sample_id, sample_values)
VALUES (%s, %s)""".format(table_name)
qdb.sql_connection.TRN.add(sql, values, many=True)
# Execute all the steps
qdb.sql_connection.TRN.execute()
@classmethod
def metadata_headers(cls):
"""Returns metadata headers available
Returns
-------
list
Alphabetical list of all metadata headers available
"""
with qdb.sql_connection.TRN:
sql = """SELECT DISTINCT table_name
FROM information_schema.columns
WHERE table_name LIKE '{0}%' AND
table_name != 'sample_template_filepath' AND
table_name != 'prep_template_filepath' AND
table_name != 'prep_template_sample' AND
table_name != 'prep_template_processing_job' AND
table_name != 'preparation_artifact' AND
table_name != 'prep_template'""".format(
cls._table_prefix)
qdb.sql_connection.TRN.add(sql)
tables = qdb.sql_connection.TRN.execute_fetchflatten()
sql = """SELECT sample_values->>'columns'
FROM qiita.%s WHERE sample_id = '{0}'""".format(
QIITA_COLUMN_NAME)
results = []
for t in tables:
qdb.sql_connection.TRN.add(sql % t)
vals = qdb.sql_connection.TRN.execute_fetchflatten()
if vals:
results.extend(loads(vals[0]))
return list(set(results))
def _common_delete_sample_steps(self, sample_names):
r"""Executes the common delete sample steps
Parameters
----------
sample_names : list of str
The sample names to be erased
Raises
------
QiitaDBUnknownIDError
If any of the `sample_names` don't exist
"""
keys = list(self.keys())
missing = [sn for sn in sample_names if sn not in keys]
if missing:
raise qdb.exceptions.QiitaDBUnknownIDError(
', '.join(missing), self._id)
with qdb.sql_connection.TRN:
# to simplify the sql strings, we are creating a base_sql, which
# will be used to create sql1 and sql2. sql1 will delete the
# sample_names from the main table ([sample | prep]_[id]), then
# sql2 will delete the sample_names from [study | prep]_sample
base_sql = 'DELETE FROM qiita.{0} WHERE sample_id=%s'
sql1 = base_sql.format(self._table_name(self._id))
sql2 = '{0} AND {1}=%s'.format(
base_sql.format(self._table), self._id_column)
for sn in sample_names:
qdb.sql_connection.TRN.add(sql1, [sn])
qdb.sql_connection.TRN.add(sql2, [sn, self.id])
qdb.sql_connection.TRN.execute()
# making sure we don't delete all the samples
qdb.sql_connection.TRN.add(
"SELECT COUNT(*) FROM qiita.{0}".format(
self._table_name(self._id)))
# 1 as the JSON formated tables have an extra "sample" where we
# store the column information
if qdb.sql_connection.TRN.execute_fetchlast() <= 1:
raise ValueError(
'You cannot delete all samples from an information file')
self.generate_files(samples=sample_names)
def delete_column(self, column_name):
"""Delete `column_name` from info file
Parameters
----------
column : str
The column name to be deleted
Raises
------
QiitaDBColumnError
If the `column_name` doesn't exist
QiitaDBOperationNotPermittedError
If a the info file can't be updated
If the column_name is selected as a specimen_id_column in the
study.
"""
if column_name not in self.categories():
raise qdb.exceptions.QiitaDBColumnError(
"'%s' not in info file %d" % (column_name, self._id))
if not self.can_be_updated(columns={column_name}):
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'%s cannot be deleted' % column_name)
# if a tube identifier column is selected disallow its deletion
specimen_id_column = qdb.study.Study(self.study_id).specimen_id_column
if specimen_id_column == column_name:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'"%s" cannot be deleted, this column is currently selected'
' as the tube identifier (specimen_id_column)' %
column_name)
with qdb.sql_connection.TRN:
table_name = 'qiita.{0}{1}'.format(self._table_prefix, self._id)
# deleting from all samples; note that (-) in pgsql jsonb means
# delete that key and value
sql = """UPDATE {0}
SET sample_values = sample_values - %s
WHERE sample_id != %s""".format(table_name)
qdb.sql_connection.TRN.add(sql, [column_name, QIITA_COLUMN_NAME])
# deleting from QIITA_COLUMN_NAME
columns = self.categories()
columns.remove(column_name)
values = '{"columns": %s}' % dumps(columns)
sql = """UPDATE {0}
SET sample_values = %s
WHERE sample_id = '{1}'""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
qdb.sql_connection.TRN.execute()
self.generate_files()
def can_be_extended(self, new_samples, new_cols):
"""Whether the template can be updated or not
Parameters
----------
new_samples : list of str
The new samples to be added
new_cols : list of str
The new columns to be added
Returns
-------
bool
Whether the template can be extended or not
str
The error message in case that it can't be extended
Raises
------
QiitaDBNotImplementedError
This method should be implemented in the subclasses
"""
raise qdb.exceptions.QiitaDBNotImplementedError(
"The method 'can_be_extended' should be implemented in "
"the subclasses")
def can_be_updated(self, **kwargs):
"""Whether the template can be updated or not
Returns
-------
bool
Whether the template can be updated or not
Raises
------
QiitaDBNotImplementedError
This method should be implemented in the subclasses
"""
raise qdb.exceptions.QiitaDBNotImplementedError(
"The method 'can_be_updated' should be implemented in "
"the subclasses")
def _common_extend_steps(self, md_template):
r"""executes the common extend steps
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
Returns
-------
list of str
The new samples being added
list of str
The new columns being added
"""
with qdb.sql_connection.TRN:
# Check if we are adding new samples
sample_ids = md_template.index.tolist()
curr_samples = set(self.keys())
existing_samples = curr_samples.intersection(sample_ids)
new_samples = set(sample_ids).difference(existing_samples)
# Check if we are adding new columns
headers = md_template.keys().tolist()
new_cols = set(headers).difference(self.categories())
if not new_cols and not new_samples:
return None, None
is_extendable, error_msg = self.can_be_extended(new_samples,
new_cols)
if not is_extendable:
raise qdb.exceptions.QiitaDBError(error_msg)
table_name = self._table_name(self._id)
if new_cols:
warnings.warn(
"The following columns have been added to the existing"
" template: %s" % ", ".join(sorted(new_cols)),
qdb.exceptions.QiitaDBWarning)
# If we are adding new columns, add them first (simplifies
# code). Sorting the new columns to enforce an order
new_cols = sorted(new_cols)
cols = self.categories()
cols.extend(new_cols)
values = dumps({"columns": cols})
sql = """UPDATE qiita.{0}
SET sample_values = %s
WHERE sample_id = '{1}'""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
if existing_samples:
# The values for the new columns are the only ones that get
# added to the database. None of the existing values will
# be modified (see update for that functionality). Remember
# that || is a jsonb to update or add a new key/value
md_filtered = md_template[new_cols].loc[existing_samples]
for sid, df in md_filtered.iterrows():
values = dict(df.items())
sql = """UPDATE qiita.{0}
SET sample_values = sample_values || %s
WHERE sample_id = %s""".format(
self._table_name(self._id))
qdb.sql_connection.TRN.add(sql, [dumps(values), sid])
if new_samples:
warnings.warn(
"The following samples have been added to the existing"
" template: %s" % ", ".join(new_samples),
qdb.exceptions.QiitaDBWarning)
new_samples = sorted(new_samples)
# At this point we only want the information
# from the new samples
md_filtered = md_template.loc[new_samples]
# Insert new samples to the study sample table
values = [[self._id, s_id] for s_id in new_samples]
sql = """INSERT INTO qiita.{0} ({1}, sample_id)
VALUES (%s, %s)""".format(self._table,
self._id_column)
qdb.sql_connection.TRN.add(sql, values, many=True)
# inserting new samples to the info file
values = [(k, row.to_json())
for k, row in md_filtered.iterrows()]
sql = """INSERT INTO qiita.{0} (sample_id, sample_values)
VALUES (%s, %s)""".format(table_name)
qdb.sql_connection.TRN.add(sql, values, many=True)
# Execute all the steps
qdb.sql_connection.TRN.execute()
return new_samples, new_cols
@classmethod
def exists(cls, obj_id):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
obj_id : int
The id to test if it exists on the database
Returns
-------
bool
True if already exists. False otherwise.
"""
cls._check_subclass()
return qdb.util.exists_table(cls._table_name(obj_id))
def _get_sample_ids(self):
r"""Returns all the available samples for the metadata template
Returns
-------
set of str
The set of all available sample ids
"""
with qdb.sql_connection.TRN:
sql = "SELECT sample_id FROM qiita.{0} WHERE {1}=%s".format(
self._table, self._id_column)
qdb.sql_connection.TRN.add(sql, [self._id])
return set(qdb.sql_connection.TRN.execute_fetchflatten())
def __len__(self):
r"""Returns the number of samples in the metadata template
Returns
-------
int
The number of samples in the metadata template
"""
return len(self._get_sample_ids())
def __getitem__(self, key):
r"""Returns the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
Returns
-------
Sample
The sample object for the sample id `key`
Raises
------
KeyError
If the sample id `key` is not present in the metadata template
See Also
--------
get
"""
with qdb.sql_connection.TRN:
if key in self:
return self._sample_cls(key, self)
else:
raise KeyError("Sample id %s does not exists in template %d"
% (key, self._id))
def __setitem__(self, key, value):
r"""Sets the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
value : Sample
The sample obj holding the new sample values
"""
raise qdb.exceptions.QiitaDBNotImplementedError()
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise qdb.exceptions.QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the sample ids
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
return iter(self._get_sample_ids())
def __contains__(self, key):
r"""Checks if the sample id `key` is present in the metadata template
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the sample id `key` is in the metadata template, false
otherwise
"""
return key in self._get_sample_ids()
def keys(self):
r"""Iterator over the sorted sample ids
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values
Returns
-------
Iterator
Iterator over Sample obj
"""
with qdb.sql_connection.TRN:
return iter(self._sample_cls(sample_id, self)
for sample_id in self._get_sample_ids())
def items(self):
r"""Iterator over (sample_id, values) tuples, in sample id order
Returns
-------
Iterator
Iterator over (sample_ids, values) tuples
"""
with qdb.sql_connection.TRN:
return iter((sample_id, self._sample_cls(sample_id, self))
for sample_id in self._get_sample_ids())
def get(self, key):
r"""Returns the metadata values for sample id `key`, or None if the
sample id `key` is not present in the metadata map
Parameters
----------
key : str
The sample id
Returns
-------
Sample or None
The sample object for the sample id `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
def _transform_to_dict(self, values):
r"""Transforms `values` to a dict keyed by sample id
Parameters
----------
values : object
The object returned from a execute_fetchall call
Returns
-------
dict
"""
result = {}
for row in values:
# Transform the row to a dictionary
values_dict = dict(row)
# Get the sample id of this row
sid = values_dict['sample_id']
del values_dict['sample_id']
# Remove _id_column from this row (if present)
if self._id_column in values_dict:
del values_dict[self._id_column]
result[sid] = values_dict
return result
def generate_files(self):
r"""Generates all the files that contain data from this template
Raises
------
QiitaDBNotImplementedError
This method should be implemented by the subclasses
"""
raise qdb.exceptions.QiitaDBNotImplementedError(
"generate_files should be implemented in the subclass!")
def to_file(self, fp, samples=None):
r"""Writes the MetadataTemplate to the file `fp` in tab-delimited
format
Parameters
----------
fp : str
Path to the output file
samples : set, optional
If supplied, only the specified samples will be written to the
file
"""
with qdb.sql_connection.TRN:
df = self.to_dataframe()
if samples is not None:
df = df.loc[samples]
# Sorting the dataframe so multiple serializations of the metadata
# template are consistent.
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
# Store the template in a file
df.to_csv(fp, index_label='sample_name', na_rep="", sep='\t',
encoding='utf-8')
def _common_to_dataframe_steps(self):
"""Perform the common to_dataframe steps
Returns
-------
pandas DataFrame
The metadata in the template,indexed on sample id
"""
with qdb.sql_connection.TRN:
# Retrieve all the information from the database
cols = self.categories()
sql = """SELECT sample_id, sample_values
FROM qiita.{0}
WHERE sample_id != '{1}'""".format(
self._table_name(self._id), QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql)
# this query is going to return a tuple
# (sample_id, dict of columns/values); however it's important to
# notice that we can't assure that all column/values pairs are the
# same for all samples as we are not doing full bookkeeping of all
# the columns in all the samples. Thus, we have 2 options:
# 1. use dict() on the query result with pd.DataFrame.from_dict so
# pandas deals with this; but this takes a crazy amount of time,
# for more info google: "performance pandas from_dict"
# 2. generate a matrix rows/samples, cols/values and load them
# via pandas.DataFrame, which actually has good performace
data = []
for sid, values in qdb.sql_connection.TRN.execute_fetchindex():
# creating row of values, first insert sample id
vals = [sid]
# then loop over all the possible values making sure that if
# the column doesn't exist in that sample, it gets a None
for c in cols:
v = None
if c in values:
v = values[c]
vals.append(v)
# append the row to the full matrix
data.append(vals)
cols.insert(0, 'sample_id')
df = pd.DataFrame(data, columns=cols, dtype=str)
df.set_index('sample_id', inplace=True)
# Make sure that we are changing np.NaN by Nones
df.where((pd.notnull(df)), None)
id_column_name = 'qiita_%sid' % (self._table_prefix)
if id_column_name == 'qiita_sample_id':
id_column_name = 'qiita_study_id'
df[id_column_name] = str(self.id)
return df
def add_filepath(self, filepath, fp_id=None):
r"""Populates the DB tables for storing the filepath and connects the
`self` objects with this filepath"""
with qdb.sql_connection.TRN:
fp_id = self._fp_id if fp_id is None else fp_id
try:
fpp_id = qdb.util.insert_filepaths(
[(filepath, fp_id)], None, "templates",
move_files=False)[0]
sql = """INSERT INTO qiita.{0} ({1}, filepath_id)
VALUES (%s, %s)""".format(self._filepath_table,
self._id_column)
qdb.sql_connection.TRN.add(sql, [self._id, fpp_id])
qdb.sql_connection.TRN.execute()
except Exception as e:
qdb.logger.LogEntry.create(
'Runtime', str(e), info={self.__class__.__name__: self.id})
raise e
def get_filepaths(self):
r"""Retrieves the list of (filepath_id, filepath)"""
with qdb.sql_connection.TRN:
return [(x['fp_id'], x['fp'])
for x in qdb.util.retrieve_filepaths(
self._filepath_table, self._id_column, self.id,
sort='descending')]
def categories(self):
"""Identifies the metadata columns present in an info file
Returns
-------
cols : list
The category fields
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_values->>'columns'
FROM qiita.{0}
WHERE sample_id = '{1}'""".format(
self._table_name(self._id), QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql)
results = qdb.sql_connection.TRN.execute_fetchflatten()
if results:
results = sorted(loads(results[0]))
return results
def extend(self, md_template):
"""Adds the given template to the current one
Parameters
----------
md_template : DataFrame
The metadata template contents indexed by sample ids
"""
with qdb.sql_connection.TRN:
md_template = self._clean_validate_template(
md_template, self.study_id, current_columns=self.categories())
new_samples, new_columns = self._common_extend_steps(md_template)
if new_samples or new_columns:
self.validate(self.columns_restrictions)
self.generate_files(new_samples, new_columns)
def _update(self, md_template):
r"""Update values in the template
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples ids
Returns
-------
set of str
The samples that were updated
set of str
The columns that were updated
Raises
------
QiitaDBError
If md_template and db do not have the same sample ids
If md_template and db do not have the same column headers
If self.can_be_updated is not True
QiitaDBWarning
If there are no differences between the contents of the DB and the
passed md_template
"""
with qdb.sql_connection.TRN:
# Retrieving current metadata
current_map = self.to_dataframe()
# simple validations of sample ids and column names
samples_diff = set(md_template.index).difference(current_map.index)
if samples_diff:
raise qdb.exceptions.QiitaDBError(
'The new template differs from what is stored '
'in database by these samples names: %s'
% ', '.join(samples_diff))
if not set(current_map.columns).issuperset(md_template.columns):
columns_diff = set(md_template.columns).difference(
current_map.columns)
raise qdb.exceptions.QiitaDBError(
'Some of the columns in your template are not present in '
'the system. Use "extend" if you want to add more columns '
'to the template. Missing columns: %s'
% ', '.join(columns_diff))
# In order to speed up some computation, let's compare only the
# common columns and rows. current_map.columns and
# current_map.index are supersets of md_template.columns and
# md_template.index, respectivelly, so this will not fail
current_map = current_map[
md_template.columns].loc[md_template.index]
# Get the values that we need to change
# diff_map is a DataFrame that hold boolean values. If a cell is
# True, means that the md_template is different from the
# current_map while False means that the cell has the same value
diff_map = current_map != md_template
# ne_stacked holds a MultiIndexed DataFrame in which the first
# level of indexing is the sample_name and the second one is the
# columns. We only have 1 column, which holds if that
# (sample, column) pair has been modified or not (i.e. cell)
ne_stacked = diff_map.stack()
# by using ne_stacked to index itself, we get only the columns
# that did change (see boolean indexing in pandas docs)
changed = ne_stacked[ne_stacked]
if changed.empty:
warnings.warn(
"There are no differences between the data stored in the "
"DB and the new data provided",
qdb.exceptions.QiitaDBWarning)
return None, None
changed.index.names = ['sample_name', 'column']
# the combination of np.where and boolean indexing produces
# a numpy array with only the values that actually changed
# between the current_map and md_template
changed_to = md_template.values[np.where(diff_map)]
# now we are going to take that map and create a new DataFrame
# which is going to have a double level index (sample_id /
# column_name) with a single column 'to'; this will looks something
# like:
# to
# sample_name column
# XX.Sample1 sample_type 6
# XX.Sample2 sample_type 5
# host_subject_id the only one
# XX.Sample3 sample_type 10
# physical_specimen_location new location
to_update = pd.DataFrame({'to': changed_to}, index=changed.index)
# reset_index will expand the multi-index and convert the example
# to:
# sample_name column to
# 0 XX.Sample1 sample_type 6
# 1 XX.Sample2 sample_type 5
# 2 XX.Sample2 host_subject_id the only one
# 3 XX.Sample3 sample_type 10
# 4 XX.Sample3 physical_specimen_location new location
to_update.reset_index(inplace=True)
new_columns = []
samples_updated = []
for sid, df in to_update.groupby('sample_name'):
samples_updated.append(sid)
# getting just columns: column and to, and then using column
# as index will generate this for XX.Sample2:
# to
# column
# sample_type 5
# host_subject_id the only one
df = df[['column', 'to']].set_index('column')
# finally to_dict in XX.Sample2:
# {'to': {'host_subject_id': 'the only one',
# 'sample_type': '5'}}
values = df.to_dict()['to']
new_columns.extend(values.keys())
sql = """UPDATE qiita.{0}
SET sample_values = sample_values || %s
WHERE sample_id = %s""".format(
self._table_name(self._id))
qdb.sql_connection.TRN.add(sql, [dumps(values), sid])
nc = list(set(new_columns).union(set(self.categories())))
table_name = self._table_name(self.id)
values = dumps({"columns": nc})
sql = """UPDATE qiita.{0}
SET sample_values = %s
WHERE sample_id = '{1}'""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
qdb.sql_connection.TRN.execute()
return set(samples_updated), set(new_columns)
def update(self, md_template):
r"""Update values in the template
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples ids
Raises
------
QiitaDBError
If md_template and db do not have the same sample ids
If md_template and db do not have the same column headers
If self.can_be_updated is not True
QiitaDBWarning
If there are no differences between the contents of the DB and the
passed md_template
"""
with qdb.sql_connection.TRN:
# Clean and validate the metadata template given
new_map = self._clean_validate_template(
md_template, self.study_id, current_columns=self.categories())
samples, columns = self._update(new_map)
self.validate(self.columns_restrictions)
self.generate_files(samples, columns)
def extend_and_update(self, md_template):
"""Performs the update and extend operations at once
Parameters
----------
md_template : DataFrame
The metadata template contents indexed by sample ids
See Also
--------
update
extend
"""
with qdb.sql_connection.TRN:
md_template = self._clean_validate_template(
md_template, self.study_id, current_columns=self.categories())
new_samples, new_columns = self._common_extend_steps(md_template)
samples, columns = self._update(md_template)
if samples is None:
samples = new_samples
elif new_samples is not None:
samples.update(new_samples)
if columns is None:
columns = new_columns
elif new_columns is not None:
columns.update(new_columns)
self.validate(self.columns_restrictions)
self.generate_files(samples, columns)
def update_category(self, category, samples_and_values):
"""Update an existing column
Parameters
----------
category : str
The category to update
samples_and_values : dict
A mapping of {sample_id: value}
Raises
------
QiitaDBUnknownIDError
If a sample_id is included in values that is not in the template
QiitaDBColumnError
If the column does not exist in the table. This is implicit, and
can be thrown by the contained Samples.
"""
with qdb.sql_connection.TRN:
if not set(self.keys()).issuperset(samples_and_values):
missing = set(self.keys()) - set(samples_and_values)
table_name = self._table_name(self._id)
raise qdb.exceptions.QiitaDBUnknownIDError(missing, table_name)
for k, v in samples_and_values.items():
sample = self[k]
if isinstance(v, np.generic):
v = np.asscalar(v)
sample.setitem(category, v)
qdb.sql_connection.TRN.execute()
def get_category(self, category):
"""Returns the values of all samples for the given category
Parameters
----------
category : str
Metadata category to get information for
Returns
-------
dict
Sample metadata for the category in the form {sample_id: value}
Raises
------
QiitaDBColumnError
If category is not part of the template
"""
with qdb.sql_connection.TRN:
if category not in self.categories():
raise qdb.exceptions.QiitaDBColumnError(category)
sql = """SELECT sample_id,
COALESCE(sample_values->>'{0}', 'None') AS {0}
FROM qiita.{1}
WHERE sample_id != '{2}'""".format(
category, self._table_name(self._id), QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql)
return dict(qdb.sql_connection.TRN.execute_fetchindex())
def check_restrictions(self, restrictions):
"""Checks if the template fulfills the restrictions
Parameters
----------
restrictions : list of Restriction
The restrictions to test if the template fulfills
Returns
-------
set of str
The missing columns
"""
cols = {col for restriction in restrictions
for col in restriction.columns}
return cols.difference(self.categories())
def _get_accession_numbers(self, column):
"""Return the accession numbers stored in `column`
Parameters
----------
column : str
The column name where the accession number is stored
Returns
-------
dict of {str: str}
The accession numbers keyed by sample id
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_id, {0}
FROM qiita.{1}
WHERE {2}=%s""".format(column, self._table,
self._id_column)
qdb.sql_connection.TRN.add(sql, [self.id])
dbresult = qdb.sql_connection.TRN.execute_fetchindex()
result = {sample_id: accession
for sample_id, accession in dbresult}
return result
def _update_accession_numbers(self, column, values):
"""Update accession numbers stored in `column` with the ones in `values`
Parameters
----------
column : str
The column name where the accession number are stored
values : dict of {str: str}
The accession numbers keyed by sample id
Raises
------
QiitaDBError
If a sample in `values` already has an accession number
QiitaDBWarning
If `values` is not updating any accesion number
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_id, {0}
FROM qiita.{1}
WHERE {2}=%s
AND {0} IS NOT NULL""".format(column, self._table,
self._id_column)
qdb.sql_connection.TRN.add(sql, [self.id])
dbresult = qdb.sql_connection.TRN.execute_fetchindex()
db_vals = {sample_id: accession
for sample_id, accession in dbresult}
common_samples = set(db_vals) & set(values)
diff = [sample for sample in common_samples
if db_vals[sample] != values[sample]]
if diff:
raise qdb.exceptions.QiitaDBError(
"The following samples already have an accession number: "
"%s" % ', '.join(diff))
# Remove the common samples form the values dictionary
values = deepcopy(values)
for sample in common_samples:
del values[sample]
if values:
sql_vals = ', '.join(["(%s, %s)"] * len(values))
sql = """UPDATE qiita.{0} AS t
SET {1}=c.{1}
FROM (VALUES {2}) AS c(sample_id, {1})
WHERE c.sample_id = t.sample_id
AND t.{3} = %s
""".format(self._table, column, sql_vals,
self._id_column)
sql_vals = list(chain.from_iterable(values.items()))
sql_vals.append(self.id)
qdb.sql_connection.TRN.add(sql, sql_vals)
qdb.sql_connection.TRN.execute()
else:
warnings.warn("No new accession numbers to update",
qdb.exceptions.QiitaDBWarning)
def validate(self, restriction_dict):
""" Validate the values in the restricted fields in info files
Parameters
----------
restriction_dict : dict of {str: Restriction}
A dictionary with the restrictions that apply to the metadata
Raises
------
QiitaDBWarning
If the values aren't castable
"""
warning_msg = []
columns = self.categories()
wrong_msg = 'Sample "%s", column "%s", wrong value "%s"'
for label, restriction in restriction_dict.items():
missing = set(restriction.columns).difference(columns)
if missing:
warning_msg.append(
"%s: %s" % (restriction.error_msg,
', '.join(sorted(missing))))
else:
valid_null = qdb.metadata_template.constants.EBI_NULL_VALUES
for column, datatype in restriction.columns.items():
# sorting by key (sample id) so we always check in the
# same order, helpful for testing
cats_by_column = self.get_category(column)
for sample in sorted(cats_by_column):
val = cats_by_column[sample]
# ignore if valid null value
if val in valid_null:
continue
# test values
if datatype == datetime:
val = str(val)
formats = [
# 4 digits year
'%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
'%Y-%m-%d %H', '%Y-%m-%d', '%Y-%m', '%Y'
]
date = None
for fmt in formats:
try:
date = datetime.strptime(val, fmt)
break
except ValueError:
pass
if date is None:
warning_msg.append(
wrong_msg % (sample, column, val))
else:
try:
datatype(val)
except (ValueError, TypeError):
warning_msg.append(
wrong_msg % (sample, column, val))
if warning_msg:
warnings.warn(
"Some functionality will be disabled due to missing "
"columns:\n\t%s.\nSee the Templates tutorial for a description"
" of these fields." % ";\n\t".join(warning_msg),
qdb.exceptions.QiitaDBWarning)
@classmethod
def _identify_forbidden_words_in_column_names(cls, column_names):
"""Return a list of forbidden words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of forbidden words present in the column_names iterable.
"""
return set(cls._forbidden_words) & set(column_names)
@classmethod
def _identify_pgsql_reserved_words_in_column_names(cls, column_names):
"""Return a list of PostgreSQL-reserved words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of reserved words present in the column_names iterable.
References
----------
.. [1] postgresql SQL-SYNTAX-IDENTIFIERS: https://goo.gl/EF0cUV.
"""
return (qdb.metadata_template.util.get_pgsql_reserved_words() &
set(column_names))
@classmethod
def _identify_column_names_with_invalid_characters(cls, column_names):
"""Return a list of invalid words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of words containing invalid (illegal) characters.
"""
valid_initial_char = ascii_letters
valid_rest = set(ascii_letters+digits+'_:|')
invalid = []
for s in column_names:
if s[0] not in valid_initial_char:
invalid.append(s)
elif set(s) - valid_rest:
invalid.append(s)
return set(invalid)
@classmethod
def _identify_qiime2_reserved_words_in_column_names(cls, column_names):
"""Return a list of QIIME2-reserved words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of words containing QIIME2-reserved words.
"""
return (qdb.metadata_template.util.get_qiime2_reserved_words() &
set(column_names))
@property
def restrictions(cls):
r"""Retrieves the restrictions based on the class._table
Returns
-------
dict
{restriction: values, ...}
"""
with qdb.sql_connection.TRN:
sql = """SELECT name, valid_values
FROM qiita.restrictions
WHERE table_name = %s"""
qdb.sql_connection.TRN.add(sql, [cls._table])
return dict(qdb.sql_connection.TRN.execute_fetchindex())
def validate_restrictions(self):
r"""Validates the restrictions
Returns
-------
success, boolean
If the validation was successful
message, string
Message if success is not True
"""
with qdb.sql_connection.TRN:
# [:-1] removing last _
name = '%s %d' % (self._table_prefix[:-1], self.id)
success = True
message = []
restrictions = self.restrictions
categories = self.categories()
difference = sorted(set(restrictions.keys()) - set(categories))
if difference:
success = False
message.append(
'%s is missing columns "%s"' % (name, ', '.join(
difference)))
to_review = set(restrictions.keys()) & set(categories)
for key in to_review:
info_vals = set(self.get_category(key).values())
msg = []
for v in info_vals:
if v not in restrictions[key]:
msg.append(v)
if msg:
success = False
message.append(
'%s has invalid values: "%s", valid values are: '
'"%s"' % (name, ', '.join(msg),
', '.join(restrictions[key])))
return success, '\n'.join(message)
| bsd-3-clause |
Shaswat27/scipy | scipy/spatial/tests/test__plotutils.py | 55 | 1567 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/cluster/setup.py | 31 | 1248 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
RPGOne/Skynet | xgboost-master/tests/python/test_training_continuation.py | 1 | 4153 | import xgboost as xgb
import testing as tm
import numpy as np
import unittest
rng = np.random.RandomState(1337)
class TestTrainingContinuation(unittest.TestCase):
num_parallel_tree = 3
xgb_params_01 = {
'silent': 1,
'nthread': 1,
}
xgb_params_02 = {
'silent': 1,
'nthread': 1,
'num_parallel_tree': num_parallel_tree
}
xgb_params_03 = {
'silent': 1,
'nthread': 1,
'num_class': 5,
'num_parallel_tree': num_parallel_tree
}
def test_training_continuation(self):
tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits
from sklearn.metrics import mean_squared_error
digits_2class = load_digits(2)
digits_5class = load_digits(5)
X_2class = digits_2class['data']
y_2class = digits_2class['target']
X_5class = digits_5class['data']
y_5class = digits_5class['target']
dtrain_2class = xgb.DMatrix(X_2class, label=y_2class)
dtrain_5class = xgb.DMatrix(X_5class, label=y_5class)
gbdt_01 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10)
ntrees_01 = len(gbdt_01.get_dump())
assert ntrees_01 == 10
gbdt_02 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=0)
gbdt_02.save_model('xgb_tc.model')
gbdt_02a = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model=gbdt_02)
gbdt_02b = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model="xgb_tc.model")
ntrees_02a = len(gbdt_02a.get_dump())
ntrees_02b = len(gbdt_02b.get_dump())
assert ntrees_02a == 10
assert ntrees_02b == 10
res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_02a.predict(dtrain_2class))
assert res1 == res2
res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_02b.predict(dtrain_2class))
assert res1 == res2
gbdt_03 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=3)
gbdt_03.save_model('xgb_tc.model')
gbdt_03a = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model=gbdt_03)
gbdt_03b = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model="xgb_tc.model")
ntrees_03a = len(gbdt_03a.get_dump())
ntrees_03b = len(gbdt_03b.get_dump())
assert ntrees_03a == 10
assert ntrees_03b == 10
res1 = mean_squared_error(y_2class, gbdt_03a.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_03b.predict(dtrain_2class))
assert res1 == res2
gbdt_04 = xgb.train(self.xgb_params_02, dtrain_2class, num_boost_round=3)
assert gbdt_04.best_ntree_limit == (gbdt_04.best_iteration + 1) * self.num_parallel_tree
res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class, ntree_limit=gbdt_04.best_ntree_limit))
assert res1 == res2
gbdt_04 = xgb.train(self.xgb_params_02, dtrain_2class, num_boost_round=7, xgb_model=gbdt_04)
assert gbdt_04.best_ntree_limit == (gbdt_04.best_iteration + 1) * self.num_parallel_tree
res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))
res2 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class, ntree_limit=gbdt_04.best_ntree_limit))
assert res1 == res2
gbdt_05 = xgb.train(self.xgb_params_03, dtrain_5class, num_boost_round=7)
assert gbdt_05.best_ntree_limit == (gbdt_05.best_iteration + 1) * self.num_parallel_tree
gbdt_05 = xgb.train(self.xgb_params_03, dtrain_5class, num_boost_round=3, xgb_model=gbdt_05)
assert gbdt_05.best_ntree_limit == (gbdt_05.best_iteration + 1) * self.num_parallel_tree
res1 = gbdt_05.predict(dtrain_5class)
res2 = gbdt_05.predict(dtrain_5class, ntree_limit=gbdt_05.best_ntree_limit)
np.testing.assert_almost_equal(res1, res2)
| bsd-3-clause |
amolkahat/pandas | asv_bench/benchmarks/algorithms.py | 3 | 3226 | import warnings
from importlib import import_module
import numpy as np
import pandas as pd
from pandas.util import testing as tm
for imp in ['pandas.util', 'pandas.tools.hashing']:
try:
hashing = import_module(imp)
break
except (ImportError, TypeError, ValueError):
pass
class Factorize(object):
params = [True, False]
param_names = ['sort']
def setup(self, sort):
N = 10**5
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
def time_factorize_int(self, sort):
self.int_idx.factorize(sort=sort)
def time_factorize_float(self, sort):
self.float_idx.factorize(sort=sort)
def time_factorize_string(self, sort):
self.string_idx.factorize(sort=sort)
class Duplicated(object):
params = ['first', 'last', False]
param_names = ['keep']
def setup(self, keep):
N = 10**5
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
def time_duplicated_int(self, keep):
self.int_idx.duplicated(keep=keep)
def time_duplicated_float(self, keep):
self.float_idx.duplicated(keep=keep)
def time_duplicated_string(self, keep):
self.string_idx.duplicated(keep=keep)
class DuplicatedUniqueIndex(object):
def setup(self):
N = 10**5
self.idx_int_dup = pd.Int64Index(np.arange(N * 5))
# cache is_unique
self.idx_int_dup.is_unique
def time_duplicated_unique_int(self):
self.idx_int_dup.duplicated()
class Match(object):
def setup(self):
self.uniques = tm.makeStringIndex(1000).values
self.all = self.uniques.repeat(10)
def time_match_string(self):
with warnings.catch_warnings(record=True):
pd.match(self.all, self.uniques)
class Hashing(object):
def setup_cache(self):
N = 10**5
df = pd.DataFrame(
{'strings': pd.Series(tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=N))),
'floats': np.random.randn(N),
'ints': np.arange(N),
'dates': pd.date_range('20110101', freq='s', periods=N),
'timedeltas': pd.timedelta_range('1 day', freq='s', periods=N)})
df['categories'] = df['strings'].astype('category')
df.iloc[10:20] = np.nan
return df
def time_frame(self, df):
hashing.hash_pandas_object(df)
def time_series_int(self, df):
hashing.hash_pandas_object(df['ints'])
def time_series_string(self, df):
hashing.hash_pandas_object(df['strings'])
def time_series_float(self, df):
hashing.hash_pandas_object(df['floats'])
def time_series_categorical(self, df):
hashing.hash_pandas_object(df['categories'])
def time_series_timedeltas(self, df):
hashing.hash_pandas_object(df['timedeltas'])
def time_series_dates(self, df):
hashing.hash_pandas_object(df['dates'])
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/manifold/tests/test_isomap.py | 121 | 4301 | from itertools import product
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal)
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert_equal(model.nbrs_.n_neighbors,
n_neighbors)
| bsd-3-clause |
jougs/nest-simulator | pynest/examples/intrinsic_currents_subthreshold.py | 12 | 8348 | # -*- coding: utf-8 -*-
#
# intrinsic_currents_subthreshold.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Intrinsic currents subthreshold
------------------------------------
This example illustrates how to record from a model with multiple
intrinsic currents and visualize the results. This is illustrated
using the ``ht_neuron`` which has four intrinsic currents: ``I_NaP``,
``I_KNa``, ``I_T``, and ``I_h``. It is a slightly simplified implementation of
neuron model proposed in [1]_.
The neuron is driven by DC current, which is alternated
between depolarizing and hyperpolarizing. Hyperpolarization
intervals become increasingly longer.
References
~~~~~~~~~~~
.. [1] Hill and Tononi (2005) Modeling Sleep and Wakefulness in the
Thalamocortical System J Neurophysiol 93:1671
http://dx.doi.org/10.1152/jn.00915.2004.
See Also
~~~~~~~~~~
:doc:`intrinsic_currents_spiking`
"""
###############################################################################
# We imported all necessary modules for simulation, analysis and plotting.
import nest
import matplotlib.pyplot as plt
###############################################################################
# Additionally, we set the verbosity using ``set_verbosity`` to suppress info
# messages. We also reset the kernel to be sure to start with a clean NEST.
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# We define simulation parameters:
#
# - The length of depolarization intervals
# - The length of hyperpolarization intervals
# - The amplitude for de- and hyperpolarizing currents
# - The end of the time window to plot
n_blocks = 5
t_block = 20.
t_dep = [t_block] * n_blocks
t_hyp = [t_block * 2 ** n for n in range(n_blocks)]
I_dep = 10.
I_hyp = -5.
t_end = 500.
###############################################################################
# We create the one neuron instance and the DC current generator and store
# the returned handles.
nrn = nest.Create('ht_neuron')
dc = nest.Create('dc_generator')
###############################################################################
# We create a multimeter to record
#
# - membrane potential ``V_m``
# - threshold value ``theta``
# - intrinsic currents ``I_NaP``, ``I_KNa``, ``I_T``, ``I_h``
#
# by passing these names in the ``record_from`` list.
#
# To find out which quantities can be recorded from a given neuron,
# run::
#
# nest.GetDefaults('ht_neuron')['recordables']
#
# The result will contain an entry like::
#
# <SLILiteral: V_m>
#
# for each recordable quantity. You need to pass the value of the
# ``SLILiteral``, in this case ``V_m`` in the ``record_from`` list.
#
# We want to record values with 0.1 ms resolution, so we set the
# recording interval as well; the default recording resolution is 1 ms.
# create multimeter and configure it to record all information
# we want at 0.1 ms resolution
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'theta',
'I_NaP', 'I_KNa', 'I_T', 'I_h']}
)
###############################################################################
# We connect the DC generator and the multimeter to the neuron. Note that
# the multimeter, just like the voltmeter is connected to the neuron,
# not the neuron to the multimeter.
nest.Connect(dc, nrn)
nest.Connect(mm, nrn)
###############################################################################
# We are ready to simulate. We alternate between driving the neuron with
# depolarizing and hyperpolarizing currents. Before each simulation
# interval, we set the amplitude of the DC generator to the correct value.
for t_sim_dep, t_sim_hyp in zip(t_dep, t_hyp):
dc.amplitude = I_dep
nest.Simulate(t_sim_dep)
dc.amplitude = I_hyp
nest.Simulate(t_sim_hyp)
###############################################################################
# We now fetch the data recorded by the multimeter. The data are returned as
# a dictionary with entry ``times`` containing timestamps for all recorded
# data, plus one entry per recorded quantity.
#
# All data is contained in the ``events`` entry of the status dictionary
# returned by the multimeter. Because all NEST function return arrays,
# we need to pick out element `0` from the result of ``GetStatus``.
data = mm.events
t = data['times']
###############################################################################
# The next step is to plot the results. We create a new figure, add a single
# subplot and plot at first membrane potential and threshold.
fig = plt.figure()
Vax = fig.add_subplot(111)
Vax.plot(t, data['V_m'], 'b-', lw=2, label=r'$V_m$')
Vax.plot(t, data['theta'], 'g-', lw=2, label=r'$\Theta$')
Vax.set_ylim(-80., 0.)
Vax.set_ylabel('Voltageinf [mV]')
Vax.set_xlabel('Time [ms]')
###############################################################################
# To plot the input current, we need to create an input current trace. We
# construct it from the durations of the de- and hyperpolarizing inputs and
# add the delay in the connection between DC generator and neuron:
#
# * We find the delay by checking the status of the dc->nrn connection.
# * We find the resolution of the simulation from the kernel status.
# * Each current interval begins one time step after the previous interval,
# is delayed by the delay and effective for the given duration.
# * We build the time axis incrementally. We only add the delay when adding
# the first time point after t=0. All subsequent points are then
# automatically shifted by the delay.
conns = nest.GetConnections(dc, nrn)
delay = conns.delay
dt = nest.GetKernelStatus('resolution')
t_dc, I_dc = [0], [0]
for td, th in zip(t_dep, t_hyp):
t_prev = t_dc[-1]
t_start_dep = t_prev + dt if t_prev > 0 else t_prev + dt + delay
t_end_dep = t_start_dep + td
t_start_hyp = t_end_dep + dt
t_end_hyp = t_start_hyp + th
t_dc.extend([t_start_dep, t_end_dep, t_start_hyp, t_end_hyp])
I_dc.extend([I_dep, I_dep, I_hyp, I_hyp])
###############################################################################
# The following function turns a name such as ``I_NaP`` into proper TeX code
# :math:`I_{\mathrm{NaP}}` for a pretty label.
def texify_name(name):
return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
###############################################################################
# Next, we add a right vertical axis and plot the currents with respect to
# that axis.
Iax = Vax.twinx()
Iax.plot(t_dc, I_dc, 'k-', lw=2, label=texify_name('I_DC'))
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
Iax.set_xlim(0, t_end)
Iax.set_ylim(-10., 15.)
Iax.set_ylabel('Current [pA]')
Iax.set_title('ht_neuron driven by DC current')
###############################################################################
# We need to make a little extra effort to combine lines from the two axis
# into one legend.
lines_V, labels_V = Vax.get_legend_handles_labels()
lines_I, labels_I = Iax.get_legend_handles_labels()
try:
Iax.legend(lines_V + lines_I, labels_V + labels_I, fontsize='small')
except TypeError:
# work-around for older Matplotlib versions
Iax.legend(lines_V + lines_I, labels_V + labels_I)
###############################################################################
# Note that ``I_KNa`` is not activated in this example because the neuron does
# not spike. ``I_T`` has only a very small amplitude.
| gpl-2.0 |
toobaz/pandas | pandas/tests/frame/test_block_internals.py | 2 | 22034 | from datetime import datetime, timedelta
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timestamp,
compat,
date_range,
option_context,
)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.core.internals import ObjectBlock
from pandas.core.internals.blocks import IntBlock
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_series_equal,
)
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals:
def test_setitem_invalidates_datetime_index_freq(self):
# GH#24096 altering a datetime64tz column inplace invalidates the
# `freq` attribute on the underlying DatetimeIndex
dti = date_range("20130101", periods=3, tz="US/Eastern")
ts = dti[1]
df = DataFrame({"B": dti})
assert df["B"]._values.freq == "D"
df.iloc[1, 0] = pd.NaT
assert df["B"]._values.freq is None
# check that the DatetimeIndex was not altered in place
assert dti.freq == "D"
assert dti[1] == ts
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame["E"] = 7.0
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame["F"] = 8.0
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord("A"), ord("Z")):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame["E"] = 7.0
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame["E"] = 7.0
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame["E"] = 7.0
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame["foo"] = "bar"
values = float_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
values = mixed_float_frame[["A", "B", "C"]].values
assert values.dtype == np.float32
values = mixed_float_frame[["C"]].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[["A", "B", "C", "D"]].values
assert values.dtype == np.float64
values = mixed_int_frame[["A", "D"]].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[["A", "B", "C"]].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[["B", "C"]].values
assert values.dtype == np.uint64
values = mixed_int_frame[["A", "C"]].values
assert values.dtype == np.int32
values = mixed_int_frame[["C", "D"]].values
assert values.dtype == np.int64
values = mixed_int_frame[["A"]].values
assert values.dtype == np.int32
values = mixed_int_frame[["C"]].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({"A": [2 ** 63 - 1]})
result = df["A"]
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [2 ** 63]})
result = df["A"]
expected = Series(np.asarray([2 ** 63], np.uint64), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [datetime(2005, 1, 1), True]})
result = df["A"]
expected = Series(
np.asarray([datetime(2005, 1, 1), True], np.object_), name="A"
)
assert_series_equal(result, expected)
df = DataFrame({"A": [None, 1]})
result = df["A"]
expected = Series(np.asarray([np.nan, 1], np.float_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0, 2]})
result = df["A"]
expected = Series(np.asarray([1.0, 2], np.float_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, 3]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, 3.0]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, True]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0, None]})
result = df["A"]
expected = Series(np.asarray([1.0, np.nan], np.float_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [1.0 + 2.0j, None]})
result = df["A"]
expected = Series(np.asarray([1.0 + 2.0j, np.nan], np.complex_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [2.0, 1, True, None]})
result = df["A"]
expected = Series(np.asarray([2.0, 1, True, None], np.object_), name="A")
assert_series_equal(result, expected)
df = DataFrame({"A": [2.0, 1, datetime(2006, 1, 1), None]})
result = df["A"]
expected = Series(
np.asarray([2.0, 1, datetime(2006, 1, 1), None], np.object_), name="A"
)
assert_series_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [
[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)],
]
df = DataFrame(data)
# check dtypes
result = df.dtypes
expected = Series({"datetime64[ns]": 3})
# mixed-type frames
float_string_frame["datetime"] = datetime.now()
float_string_frame["timedelta"] = timedelta(days=1, seconds=1)
assert float_string_frame["datetime"].dtype == "M8[ns]"
assert float_string_frame["timedelta"].dtype == "m8[ns]"
result = float_string_frame.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [
np.dtype("object"),
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
],
index=list("ABCD") + ["foo", "datetime", "timedelta"],
)
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
df = DataFrame(index=range(3))
df["A"] = arr
expected = DataFrame(
{"A": pd.timedelta_range("00:00:01", periods=3, freq="s")}, index=range(3)
)
assert_frame_equal(df, expected)
expected = DataFrame(
{
"dt1": Timestamp("20130101"),
"dt2": date_range("20130101", periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
},
index=range(3),
)
df = DataFrame(index=range(3))
df["dt1"] = np.datetime64("2013-01-01")
df["dt2"] = np.array(
["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]"
)
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9))
return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype)
msg = "compound dtypes are not implemented in the DataFrame constructor"
with pytest.raises(NotImplementedError, match=msg):
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
# these work (though results may be unexpected)
f("int64")
f("float64")
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f("M8[ns]")
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2], "C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert df0._data.blocks[0].dtype != df1._data.blocks[0].dtype
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self, float_frame, float_string_frame):
cop = float_frame.copy()
cop["E"] = cop["A"]
assert "E" not in float_frame
# copy objects
copy = float_string_frame.copy()
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, timezone_frame):
empty_frame = DataFrame()
unpickled = tm.round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(timezone_frame)
assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize("US/Eastern")
ser_starting = ser_starting.tz_convert("UTC")
ser_starting.index.name = "starting"
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize("US/Eastern")
ser_ending = ser_ending.tz_convert("UTC")
ser_ending.index.name = "ending"
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
df = DataFrame(
{"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[
np.dtype("float64"),
np.dtype("int64"),
np.dtype(objectname),
np.dtype(datetime64name),
],
index=["a", "b", "c", "f"],
)
assert_series_equal(result, expected)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"d": np.array([1.0] * 10, dtype="float32"),
"e": np.array([1] * 10, dtype="int32"),
"f": np.array([1] * 10, dtype="int16"),
"g": Timestamp("20010102"),
},
index=np.arange(10),
)
result = df._get_numeric_data()
expected = df.loc[:, ["a", "b", "d", "e", "f"]]
assert_frame_equal(result, expected)
only_obj = df.loc[:, ["c", "g"]]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_get_numeric_data_extension_dtype(self):
# GH 22290
df = DataFrame(
{
"A": integer_array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"),
"B": Categorical(list("abcabc")),
"C": integer_array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"),
"D": IntervalArray.from_breaks(range(7)),
}
)
result = df._get_numeric_data()
expected = df.loc[:, ["A", "C"]]
assert_frame_equal(result, expected)
def test_convert_objects(self, float_string_frame):
oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, float_string_frame)
assert converted["A"].dtype == np.float64
# force numeric conversion
float_string_frame["H"] = "1."
float_string_frame["I"] = "1"
# add in some items that will be nan
length = len(float_string_frame)
float_string_frame["J"] = "1."
float_string_frame["K"] = "1"
float_string_frame.loc[0:5, ["J", "K"]] = "garbled"
converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted["H"].dtype == "float64"
assert converted["I"].dtype == "int64"
assert converted["J"].dtype == "float64"
assert converted["K"].dtype == "float64"
assert len(converted["J"].dropna()) == length - 5
assert len(converted["K"].dropna()) == length - 5
# via astype
converted = float_string_frame.copy()
converted["H"] = converted["H"].astype("float64")
converted["I"] = converted["I"].astype("int64")
assert converted["H"].dtype == "float64"
assert converted["I"].dtype == "int64"
# via astype, but errors
converted = float_string_frame.copy()
with pytest.raises(ValueError, match="invalid literal"):
converted["H"].astype("int32")
# mixed in a single column
df = DataFrame(dict(s=Series([1, "na", 3, 4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame({"a": [1, 2, 3], "b": [4.0, 5, 6], "c": ["x", "y", "z"]})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
df = DataFrame(
{
"a": ["a", 1, 2, 3],
"b": ["b", 2.0, 3.0, 4.1],
"c": [
"c",
datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3),
],
"d": [1, 2, 3, "d"],
},
columns=["a", "b", "c", "d"],
)
df = df.iloc[1:].infer_objects()
assert df["a"].dtype == "int64"
assert df["b"].dtype == "float64"
assert df["c"].dtype == "M8[ns]"
assert df["d"].dtype == "object"
expected = DataFrame(
{
"a": [1, 2, 3],
"b": [2.0, 3.0, 4.1],
"c": [datetime(2016, 1, 1), datetime(2016, 1, 2), datetime(2016, 1, 3)],
"d": [2, 3, "d"],
},
columns=["a", "b", "c", "d"],
)
# reconstruct frame to verify inference is same
tm.assert_frame_equal(df.reset_index(drop=True), expected)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context("chained_assignment", None):
Y = DataFrame(
np.random.random((4, 4)),
index=("a", "b", "c", "d"),
columns=("e", "f", "g", "h"),
)
repr(Y)
Y["e"] = Y["e"].astype("object")
Y["g"]["c"] = np.NaN
repr(Y)
result = Y.sum() # noqa
exp = Y["g"].sum() # noqa
assert pd.isna(Y["g"]["c"])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
"c": ["foo", "bar", "baz"],
"d": [None, None, None],
"e": [3.14, 0.577, 2.773],
}
)
tm.assert_index_equal(df._get_numeric_data().columns, pd.Index(["a", "b", "e"]))
def test_strange_column_corruption_issue(self):
# (wesm) Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = np.nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = np.nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if col not in wasCol:
wasCol[col] = 1
df[col] = np.nan
df[col][dt] = i
myid = 100
first = len(df.loc[pd.isna(df[myid]), [myid]])
second = len(df.loc[pd.isna(df[myid]), [myid]])
assert first == second == 0
def test_constructor_no_pandas_array(self):
# Ensure that PandasArray isn't allowed inside Series
# See https://github.com/pandas-dev/pandas/issues/23995 for more.
arr = pd.Series([1, 2, 3]).array
result = pd.DataFrame({"A": arr})
expected = pd.DataFrame({"A": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
assert isinstance(result._data.blocks[0], IntBlock)
def test_add_column_with_pandas_array(self):
# GH 26390
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]})
df["c"] = pd.array([1, 2, None, 3])
df2 = pd.DataFrame(
{
"a": [1, 2, 3, 4],
"b": ["a", "b", "c", "d"],
"c": pd.array([1, 2, None, 3]),
}
)
assert type(df["c"]._data.blocks[0]) == ObjectBlock
assert type(df2["c"]._data.blocks[0]) == ObjectBlock
assert_frame_equal(df, df2)
| bsd-3-clause |
airanmehr/bio | Scripts/Miscellaneous/Ali/my_global_functions.py | 1 | 2494 | import os
import pandas as pd
import numpy as np
import tempfile
def my_mkdir(dir):
"""Check for the existence of the directory "dir", otherwise make it recursively.
:param dir:
:return:
"""
if not os.path.exists(dir):
print "Making %s" % dir
os.system("mkdir -p %s" % dir)
def get_gene_coordinates(g):
""" Return the coordinates of gene "g" in hg19/CRCH37
:param g: Gene Name
:return: [Gene Name, Chromosome, Start Position, End Position]
"""
g = g.upper()
coordinate_file="/media/alek/DATA/DB/gene_info"
df=pd.read_csv(coordinate_file, sep='\t', header=None).set_index(0)
if g in df.index:
T=df.loc[g].values
if len(T.shape)==1:
T=T[None,:]
if len(np.unique(T[:,1]))>1:
print "\"%s\" exists in more than 1 chromosome!"%(g)
else:
return g, T[0, 1], np.min(T[:, 2]), np.max(T[:, 3])
else:
print "\"%s\" is not presented in the %s!"%(g, coordinate_file)
def get_ref_chr(Chr):
ref_file="/home/alek/DATABASE/human_ancestor_GRCh37_e59/human_ancestor_%s.fa"%Chr
f= open(ref_file,'r')
l=f.readline()
Ref=f.read().replace("\n","")
f.close()
return Ref.upper()
def get_vcf_header(in_file):
temp_file = tempfile.NamedTemporaryFile()
cmd = "zgrep -w '^#CHROM' -m1 %s > %s"%(in_file, temp_file.name)
os.system(cmd)
header = pd.read_csv(temp_file.name, sep = '\t', header=None).values
temp_file.close()
return list(header)
def get_rand(i):
i = int(i)
return (np.round((10**i-2)*np.random.rand())+1)/10**i
from sklearn import mixture
def Bicluster_GMM(scores):
# prep
scores = np.array(scores)
X = np.reshape(np.array(scores), (-1, 1))
X=X/max(X)
threshold=np.median(X)-2*(np.std(X))
I=np.nonzero(X>threshold)[0]
# print len(I)
# print threshold*max(scores)
# fit model
g = mixture.GMM(n_components=2)
g.fit(X[I])
# print g.means_*max(scores)
# predict labels
labels = g.predict(X)
high_gaussian_ind = np.argmax(g.means_[:, 0])
low_gaussian_ind = np.argmin(g.means_[:, 0])
pred_labels = np.array(labels == high_gaussian_ind, dtype=float)
# class posteriors
probs = g.predict_proba(X)
p_carr = probs[:, high_gaussian_ind]
p_ncarr = probs[:, low_gaussian_ind]
return pred_labels, p_carr, p_ncarr
def smoothing(x, wsize = 5):
s = pd.Series(x)
r = s.rolling(window=wsize)
return r.mean().values | mit |
sahat/bokeh | bokeh/mplexporter/tools.py | 75 | 1732 | """
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
| bsd-3-clause |
mikhailklassen/flash-tools | flash_generic/toolbox.py | 1 | 11243 | try:
from yt.mods import *
except:
import sys
sys.exit("yt not found or not in PYTHONPATH")
import numpy as np
import matplotlib.pyplot as plt
# List of constants in CGS units
pc = 3.08568025e18 # Parsec in cm
AU = 1.49598e13 # AU in cm
Msun = 1.98892e33 # Solar mass in g
Rsun = 6.955e10 # Radius of the Sun in cm
Lsun = 3.839e33 # Solar luminosity in erg
secyr = 31556926.0 # Seconds in a year
G = 6.6725985e-8 # Gravitational constant in cm^3 g^-1 s^-2
sb = 5.6705119e-5 # Stefan-Boltzmann constant in erg cm^-2 s^-1 K^-4
hconst = 6.6260755400000E-27 # Planck's constant in erg Hz^-1
c = 2.9979245800000E+10 # Speed of light in cm s^-1
kb = 1.3806581200000E-16 # Boltzmann's constant in erg K^-1
electron_charge = 4.8032068150000E-10 # Charge of the electron in esu
electron_mass = 9.1093897540000E-28 # Mass of the electron in g
proton_mass = 1.6726231100000E-24 # Mass of the proton in g
fine_structure = 7.2973530764000E-03 # Fine structure constant
avogadro = 6.0221367360000E+23 # Avogadro's constant
gas_constant = 8.3145119843000E+07 # Ideal gas constant in cm^2 s^-2 K^-1
wien = 2.8977562400000E-01 # Wien's constant in cm K
def select_scale(length):
'''
Chooses an "optimal" scale for plotting purposes, i.e. should the distance axis
be in units of cm, solar radii, AU, pc, kpc, Mpc, etc.
'''
scale = 'cm'
if length/Rsun > 0.001:
scale = 'Rsun'
if length/Rsun > 1000.0:
scale = 'AU'
if length/AU > 10000.0:
scale = 'pc'
if length/pc > 1000.0:
scale = 'kpc'
if length/pc > 1.e6:
scale = 'Mpc'
return scale
def get_times(files):
'''
Returns an array of times in the base units of the simulation from an input
array of plot files or checkpoint files.
'''
ts = TimeSeriesData.from_filenames(files)
times = np.zeros(len(ts))
for i, pf in enumerate(ts):
times[i] = pf.current_time
return times
def index_containing_substring(the_list, substring):
for i, s in enumerate(the_list):
if substring in s:
return i
return -1
def find_rigid_rotation_omega(parameters):
is_rotating = False
keys = parameters.keys()
try:
omega_idx = index_containing_substring(keys,'omega')
print 'Found rotation parameter {0}.'.format(keys[omega_idx])
omega = parameters[keys[omega_idx]]
is_rotating = True
except:
omega = 0.0
return omega, is_rotating
def find_rotation_beta(M,R,omega):
# Calculate the ratio of rotational energy to gravitational binding
# energy, crudely approximating the system as a uniform density
# sphere.
I = 2.0/5.0 * M * R**2
Krot = 0.5 * I * omega**2
U = 3.0/5.0 * G * M**2 / R
frac = Krot/U
return frac
def radial_profile(parameters,chkfiles):
xmin, xmax = float(parameters['xmin']), float(parameters['xmax'])
ymin, ymax = float(parameters['ymin']), float(parameters['ymax'])
zmin, zmax = float(parameters['zmin']), float(parameters['zmax'])
xsize = xmax-xmin
xradius = xsize/2.0
ysize = ymax-ymin
yradius = ysize/2.0
zsize = zmax-zmin
zradius = zsize/2.0
try:
mu_mol = float(parameters['mu_mol'])
except:
mu_mol = 2.14
pf = load(chkfiles[0])
sphere = pf.h.sphere(pf.domain_center, (1., "pc"))
rad_profile = BinnedProfile1D(sphere, 300, "Radiuspc", 0.0, 1., log_space=False)
rad_profile.add_fields("Density")
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.axvline(x=1.595e17/pc,color='#aaaaaa')
ax1.semilogy(rad_profile["Radiuspc"], rad_profile["Density"])
ax1.set_ylim(min(rad_profile["Density"]),1.1*max(rad_profile["Density"]))
ax1.grid()
ax1.set_title('Density Profile')
ax1.set_xlabel('Radius (pc)')
ax1.set_ylabel(r'Mass Density (g cm$^{-3}$)')
ax2 = ax1.twinx()
ax2.set_yscale('log')
y1, y2 = ax1.get_ylim()
ax2.set_ylim(y1/mu_mol/proton_mass,y2/mu_mol/proton_mass)
ax2.set_ylabel(r'Number Density (cm$^{-3}$)')
fig.savefig(parameters['outpath']+'/density_profile.png')
# Same plot, but log-log axes
plt.clf()
ax1 = fig.add_subplot(111)
ax1.axvline(x=1.595e17/pc,color='#aaaaaa')
ax1.loglog(rad_profile["Radiuspc"], rad_profile["Density"])
ax1.grid()
ax1.set_title('Density Profile')
ax1.set_xlabel('Radius (pc)')
ax1.set_ylabel(r'Mass Density (g cm$^{-3}$)')
ax2 = ax1.twinx()
ax2.set_yscale('log')
y1, y2 = ax1.get_ylim()
ax2.set_ylim(y1/mu_mol/proton_mass,y2/mu_mol/proton_mass)
ax2.set_ylabel(r'Number Density (cm$^{-3}$)')
fig.savefig(parameters['outpath']+'/density_profile_log.png')
rho_c = max(rad_profile["Density"])
return xsize, xradius, rho_c
#def cloud_mass(parameters):
# '''
# Takes a dict "parameters" and determines the cloud mass.
# '''
# return
#def mean_density(parameters):
# return rho_mean, n_mean
#def freefall_time(parameters):
# return
#
#def rms_Mach_number(parameters):
# return
#
#def sound_cross_time(parameters):
# return
#
#
#def turbulent_cross_time(parameters):
# return
#
#
#def Jeans_length(parameters):
# return
#
#
#def Jeans_volume(parameters):
# return
#
#
#def Jeans_mass(parameters):
# return
#
#
#def simulation_box_size(parameters):
# return
#
#
#def smallest_cell_size(parameters):
# return
#
#
#def max_gas_density(parameters):
# return
#
#
#def max_number_density(parameters):
# return
#
#
#def sink_r_accr(parameters):
# return
#
#
#def number_sinks(pltfiles):
# return
#
def ZAMS_radius(mass):
'''
Based on Tout et al. 1996
Calculate the ZAMS radius of a star for a given mass (expressed in solar units).
Returns the radius in solar units. Accurate for stars of solar metallicity.
'''
theta = 1.71535900
iota = 6.59778800
kappa = 10.08855000
llambda = 1.01249500
mu = 0.07490166
nu = 0.01077422
xi = 3.08223400
omicron = 17.84778000
ppi = 0.00022582
mm = mass # solar masses
Rms = (theta*mm**(2.5) + iota*mm**(6.5) + kappa*mm**(11) + llambda*mm**(19) + mu*mm**(19.5))
Rms = Rms / (nu + xi*mm**(2) + omicron*mm**(8.5) + mm**(18.5) + ppi*mm**(19.5))
return Rms
def ZAMS_luminosity(mass):
'''
Based on Tout et al. 1996
Calculate the ZAMS luminosity of a star for a given mass (expressed in solar units).
Returns the luminosity in solar units. Accurate for stars of solar metallicity.
'''
alpha = 0.39704170
beta = 8.52762600
gamm = 0.00025546
delta = 5.43288900
epsil = 5.56357900
zeta = 0.78866060
eta = 0.00586685
mm = mass # solar masses
Lms = (alpha*mm**(5.5)+beta*mm**(11))
Lms = Lms / (gamm + mm**(3) + delta*mm**(5) + epsil*mm**(7) + zeta*mm**(8) + eta*mm**(9.5))
return Lms
def planck_phot(fr,T):
'''
Planck function, with the integration over solid angle already taken,
divided by the photon energy. When integrated, we get the total number
of photons.
Returns the number of photons emitted at a given frequency, for a
blackbody at a given temperature.
'''
tpic2 = 2*pi/(c**2)
rfr = hconst/kb/T
nphot = tpic2 * fr**2 / ( exp(fr*rfr) - 1.0 )
return nphot
def plot_profiles(parameters):
xmin, xmax = float(parameters['xmin']), float(parameters['xmax'])
ymin, ymax = float(parameters['ymin']), float(parameters['ymax'])
zmin, zmax = float(parameters['zmin']), float(parameters['zmax'])
xsize = xmax-xmin
xradius = xsize/2.0
ysize = ymax-ymin
yradius = ysize/2.0
zsize = zmax-zmin
zradius = zsize/2.0
try:
mu_mol = float(parameters['mu_mol'])
except:
mu_mol = 2.14
try:
profile = int(parameters['density_profile'])
except:
sys.exit('density_profile parameter not defined. Check the flash.par parameter file.')
if profile == 1: # Power-law profile
print 'Power-law profile'
dens_inner_radius = float(parameters['dens_inner_radius'])
dens_outer_radius = float(parameters['dens_outer_radius'])
M_total = float(parameters['M_total'])
dens_power_law = float(parameters['dens_power_law'])
density_contrast = float(parameters['density_contrast'])
fita = dens_power_law * (3.0 - dens_power_law) * M_total / \
(8.0*np.pi) * dens_outer_radius**(dens_power_law-3.0) / \
dens_inner_radius**(dens_power_law+2.0)
rho_c = (3.0 - dens_power_law)*M_total / (4.0*np.pi)* \
dens_outer_radius**(dens_power_law - 3.0) / \
dens_inner_radius**(dens_power_law) * \
(1.0 + dens_power_law / 2.0)
rho0 = (3.0 - dens_power_law)*M_total/(4.0*np.pi*dens_outer_radius**3.0)
radius = xradius
radius = np.linspace(0,radius,300)
dens = np.zeros(len(radius))
for i in range(len(dens)):
if (radius[i] <= dens_inner_radius):
dens[i] = -fita*radius[i]**2.0 + rho_c
elif (radius[i] <= dens_outer_radius) and (radius[i] > dens_inner_radius):
dens[i] = (3.0 - dens_power_law)*M_total / \
(4.0*np.pi*dens_outer_radius**(3.0-dens_power_law)* \
radius[i]**dens_power_law)
else:
dens[i] = density_contrast * rho0
# Plot the density profile
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.axvline(x=2.440868032664758e-4,color='#aaaaaa')
ax1.axvline(x=dens_inner_radius/pc,color='#aaaaaa')
ax1.axvline(x=dens_outer_radius/pc,color='#aaaaaa')
ax1.semilogy(radius/pc,dens)
ax1.set_xlim(radius[0]/pc,1.1*radius[-1]/pc)
ax1.set_ylim(min(dens),1.1*max(dens))
ax1.grid()
ax1.set_title('Density Profile')
ax1.set_xlabel('Radius (pc)')
ax1.set_ylabel(r'Mass Density (g cm$^{-3}$)')
ax2 = ax1.twinx()
ax2.set_yscale('log')
y1, y2 = ax1.get_ylim()
ax2.set_ylim(y1/mu_mol/proton_mass,y2/mu_mol/proton_mass)
ax2.set_ylabel(r'Number Density (cm$^{-3}$)')
plt.savefig(parameters['outpath']+'/density_profile.png')
# Same plot, but log-log axes
plt.clf()
ax1 = fig.add_subplot(111)
ax1.axvline(x=dens_inner_radius/pc,color='#aaaaaa')
ax1.axvline(x=dens_outer_radius/pc,color='#aaaaaa')
ax1.loglog(radius/pc,dens)
ax1.grid()
ax1.set_xlim(radius[0]/pc,1.1*radius[-1]/pc)
ax1.set_ylim(min(dens),1.1*max(dens))
ax1.set_title('Density Profile')
ax1.set_xlabel('Radius (pc)')
ax1.set_ylabel(r'Mass Density (g cm$^{-3}$)')
ax2 = ax1.twinx()
ax2.set_yscale('log')
y1, y2 = ax1.get_ylim()
ax2.set_ylim(y1/mu_mol/proton_mass,y2/mu_mol/proton_mass)
ax2.set_ylabel(r'Number Density (cm$^{-3}$)')
plt.savefig(parameters['outpath']+'/density_profile_log.png')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.