repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gillenbrown/betterplotlib
|
betterplotlib/_generate_interface.py
|
1
|
3681
|
# auto generates _interface.py
import os
this_dir = os.path.realpath(os.path.split(__file__)[0])
interface_loc = this_dir + os.sep + "_interface.py"
axes_loc = this_dir + os.sep + "axes_bpl.py"
figure_loc = this_dir + os.sep + "figure_bpl.py"
interface = open(interface_loc, "w")
# write a header
interface.write("from matplotlib import docstring\n" +
"from matplotlib.pyplot import _autogen_docstring\n" +
"import matplotlib.pyplot as plt\n\n"
"import betterplotlib as bpl\n\n")
def get_functions(loc, definition):
func_args = []
with open(loc, "r") as original:
in_axes = False
in_def = False
for line in original:
if in_axes == False and line.strip() == definition:
in_axes = True
continue
if in_axes and line.strip().startswith("def "):
in_def = True
this_def = ""
if in_def:
if "def" in line:
this_def += line.strip().replace("self, ", "") + "\n"
else:
this_def += line[4:]
if not line.endswith("\n"):
this_def += "\n"
if in_def and ":" in line:
in_def = False
func_args.append(this_def)
return func_args
def strip_defaults(function_def):
# gets rid of all the default parameters in a function argument so that it
# can be turned from the definition into a function call
# first get where the arguments start
first_paren_idx = function_def.find("(")
# get the function name and first parenthesis
def_begin = function_def[4:first_paren_idx + 1]
# then get the arguments. The -3 at the end takes care of the newline,
# colon, and closing parenthesis.
args = function_def[first_paren_idx + 1:-3]
# then we can examine each one in turn, formatiing it properly
args_list = []
for arg in args.split(","):
# find ehere the equals sign indicating a default parameter is
idx_equals = arg.find("=")
# if there isn't one, there is no default, so we can just keep the
# whole thing
if idx_equals == -1:
args_list.append(arg)
# if there is a default paraemeter, get rid of it.
else:
args_list.append(arg[0:idx_equals])
# then put them back into a comma separated list
args_joined = ",".join(args_list)
# to have things line up properly in the file, we need to add some spaces
args_joined = args_joined.replace("\n", "\n ")
# then join everything together
return def_begin + args_joined + ")"
axes_definition = "class Axes_bpl(Axes):"
# figure_definition = "class Figure_bpl(Figure):"
axes_functions_args = get_functions(axes_loc, axes_definition)
# figure_functions = get_functions(figure_loc, figure_definition)
for func_args in axes_functions_args:
func_name = func_args.split()[1].split("(")[0]
func_args_no_defauts = strip_defaults(func_args)
interface.write("@_autogen_docstring(bpl.Axes_bpl.{})\n".format(func_name) + \
func_args + \
" ax = plt.gca(projection='bpl')\n" + \
" return ax.{}\n\n".format(func_args_no_defauts))
# for func in figure_functions:
# interface.write("@_autogen_docstring(bpl.Figure_bpl.{})\n".format(func) + \
# "def {}(*args, **kwargs):\n".format(func) + \
# " fig = plt.gcf()\n" + \
# " return fig.{}(*args, **kwargs)\n\n".format(func))
interface.close()
|
mit
|
adamgreenhall/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
83
|
17276
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
|
bsd-3-clause
|
RJTK/dwglasso_cweeds
|
src/models/dwglasso.py
|
1
|
12057
|
'''
Implements DWGLASSO and associated helper functions. We will load
in the dT data from /data/interim/interim_data.hdf and then apply the
dwglasso algorithm on a subsequence of this data.
NOTE: This file is intended to be executed by make from the top
level of the project directory hierarchy. We rely on os.getcwd()
and it will not work if run directly as a script from this directory.
'''
import time
import sys
import numba # JIT compilation
import numpy as np
import warnings
from scipy.linalg import cho_factor, cho_solve, lu_factor, lu_solve, eigh
import matplotlib as mpl; mpl.use('TkAgg')
from matplotlib import pyplot as plt
from scipy.optimize import differential_evolution
from src.conf import MAX_P, ZZT_FILE_PREFIX, YZT_FILE_PREFIX,\
X_VALIDATE_FILE_PREFIX
def build_YZ(X, p):
'''
Builds the Y (output) and Z (input) matrices for the model from
the X matrix consisting of temperatures series in it's rows.
We need to also provide the lag length of the model, p.
X: n x T matrix of data. Each row is a time series
X = [x(0), x(1), ..., x(T - 1)]
p: Model lag length
Returns (Y, Z):
Y: n x (T - p) matrix [x(p), x(p + 1), ..., x(T - 1)]
Z: np x (T - p) matrix [z(p - 1), z(p), ..., z(T - 2)]
where z(t) = [x(t).T x(t - 1).T, ..., x(t - p + 1).T].T stacks x's
Then with a n x np coefficient matrix we obtain:
Y_hat = B_hat * Z
'''
n = X.shape[0]
T = X.shape[1]
if T == 0:
return np.array([]), np.array([])
Y = X[:, p:]
assert Y.shape[0] == n and Y.shape[1] == T - p, 'Issues with shape!'
Z = np.zeros((n * p, T - p))
for tau in range(p):
Z[tau * n: (tau + 1) * n, :] = X[:, tau: tau - p]
return Y, Z
def cross_validate(ZZT: np.array, YZT: np.array, X_test,
p: int, mu: float=0.1, tol=1e-6,
max_iter=100, warn_PSD=False, ret_B_err=False,
t_limit_sec=3600):
'''
Run through each possible combinations of parameters given by
the lists lmbda, alpha, delta, and sigma and then fit the dwglasso
model and cross validate it against the 1-step ahead prediction
task on the data given in Z and Y. Y_hat = B_hat * Z.
'''
t0 = time.time()
def tlimit_func(*args, **kwargs):
if time.time() - t0 >= t_limit_sec:
return True
return
def f(x):
l, a, d, s = x
B_hat = dwglasso(ZZT=ZZT, YZT=YZT, p=p, lmbda=l, alpha=a,
mu=mu, delta=d, sigma=s, tol=tol,
warn_PSD=warn_PSD, ret_B_err=ret_B_err,
silent=True)
Y_hat = np.dot(B_hat, Z)
err = (np.linalg.norm(Y - Y_hat, ord='fro')**2) / T
G = np.abs(sum([B_hat[:, tau * n:(tau + 1) * n]
for tau in range(p)]).T)
G = G - np.diag(np.diag(G))
G = G > 0 # The Granger-causality graph
print('err = {:15.2f}'.format(err),
'(l = %9.4f, a = %6.5f, d = %6.5f, s = %9.4f) ' % tuple(x),
'Num edges: {:9d}'.format(np.sum(G)), end='\r')
return err
Y, Z = build_YZ(X_test, p)
T = Y.shape[1]
n = Y.shape[0]
bounds = [(0, 2), (0, 1), (0, 1), (0, 100)] # l a d s
res = differential_evolution(f, bounds, disp=True, polish=False,
maxiter=100, popsize=25,
callback=tlimit_func, tol=1e-4)
print() # Newline
print('Optimizer Success:', res.success)
l, a, d, s = res.x
print('Optimal parameters: lmbda = %0.5f, alpha = %0.5f, delta = %0.5f,'
' sigma = %0.5f' % (l, a, d, s))
B_hat = dwglasso(ZZT=ZZT, YZT=YZT, p=p, lmbda=l, alpha=a,
mu=mu, delta=d, sigma=s, tol=tol,
warn_PSD=warn_PSD, ret_B_err=ret_B_err)
print()
plt.imshow(B_hat)
plt.colorbar()
plt.title('Optimal B_hat')
plt.show()
return B_hat
# This function is ridiculously complicated
def dwglasso(ZZT: np.array, YZT: np.array, p: int=1, lmbda: float=0.0,
alpha: float=0.05, mu: float=0.1, delta=0, sigma=0,
tol=1e-6, max_iter=100, warn_PSD=True, ret_B_err=False,
silent=False, assert_params=True):
'''Minimizes over B:
1/(2T)||Y - BZ||_F^2 + lmbda[alpha||B||_F^2 + (1 - alpha)G_DW(B)
via ADMM. G_DW is the depth wise group regularizer
\sum_{ij}||Bt_ij||_2 where Bt_ij is the p-vector (B(1)_ij ... B(p)_ij)
e.g. the filter coefficients from xj -> xi.
if lmbda = 0 we have simple ordinary least squares, and if alpha = 1
then we have tikhonov regularization. mu is a tuning parameter
for ADMM convergence and unless lmbda = 0 or alpha = 1, we need mu > 0.
'''
if assert_params:
assert alpha >= 0 and alpha <= 1, 'Required: alpha \in [0, 1]'
assert lmbda >= 0, 'Required: lmbda >= 0'
assert mu >= 0, 'Required: mu >= 0' # 0 only if lmbda = 0 or alpha = 1
assert sigma >= 0, 'Required: sigma >= 0'
assert delta >= 0 and delta <= 1, 'Required: delta \in [0, 1]'
# Proximity operators
# @numba.jit(nopython=True, cache=True)
def proxf_lu(V: np.array):
'''
proximity operator ||Y - BX||_F^2 + lmbda*(1 - alpha)||B||_F^2,
implemented using an LU factorized covariance matrix. This will
work even if the covariance matrix is (due to numerical issues)
not positive semi definite.
'''
return (lu_solve(lu_piv, YZT.T + V.T / mu,
overwrite_b=True, check_finite=False)).T
def proxf_cho(V: np.array):
'''
proximity operator of ||Y - BX||_F^2 + lmbda*(1 - alpha)||B||_F^2,
implemented using a cholesky factorized covariance matrix. This
requires the covariance matrix to be (numerically) positive
semidefinite.
'''
return (cho_solve(L_and_lower, YZT.T + V.T / mu,
overwrite_b=True, check_finite=False)).T
@numba.jit(nopython=True, cache=True) # Dramatic speed up is achieved
def proxg(V: np.array):
'''proximity operator of alpha*lmbda*sum_ij||B_ij||_2 See DWGLASSO
paper for details
'''
n = V.shape[0]
p = V.shape[1] // n
P = np.empty((n, n * p))
for i in range(n):
for j in range(n):
Vtij = V[i, j::n]
Vtij_l2 = 0
for tau in range(p): # Calling np.norm not valid w/ numba
Vtij_l2 += Vtij[tau]**2
if Vtij_l2 == 0:
P[i, j::n] = 0
else:
r = lmbda * (1 - alpha) * mu / Vtij_l2
P[i, j::n] = max(0, 1 - r) * Vtij
return P
def admm():
def rel_err(Bxk, Bzk): # F-norm difference between Bx and Bz per entry
return (1 / (n * n * p)) * np.linalg.norm(Bxk - Bzk, 'f')**2
# Init with 0s
Bz, Bu = np.zeros((n, n * p)), np.zeros((n, n * p))
Bx = proxf(Bz)
k = 0
rel_err_k = rel_err(Bx, Bz)
while rel_err_k > tol and k < max_iter: # ADMM iterations
k += 1
if not silent:
print('iter:', k, '(1/pn^2)||Bx - Bz||_F^2 =',
rel_err_k, end='\r')
sys.stdout.flush()
Bx = proxf(Bz - Bu)
Bz = proxg(Bx + Bu)
Bu = Bu + Bx - Bz
rel_err_k = rel_err(Bx, Bz)
if not silent:
print() # Print out a newline
if k >= max_iter: # Should only ever reach k == max_iter
if not silent:
warnings.warn('Max iterations exceeded! rel_err = %e'
% rel_err_k, RuntimeWarning)
return Bz
# ----------------REAL FUNCTION ENTRY POINT-----------------------
n = ZZT.shape[0] // p
# Regularize the covariance estimate
ZZT = (1 - delta) * ZZT + delta * np.diag(np.diag(ZZT)) +\
sigma * np.eye(n * p)
B_err = np.ones((n * p, n * p))
if warn_PSD and not silent: # Check if ZZT > 0
try:
cho_factor(ZZT)
except np.linalg.LinAlgError as e:
lmbda_min = eigh(ZZT, eigvals_only=True, turbo=False,
check_finite=False, eigvals=(0, 0))
warnings.warn('ZZT is indefinite! lmbda_min(ZZT) = %e, err: %s'
% (lmbda_min, e.args))
if ret_B_err:
return B_err
if lmbda == 0: # OLS
if not silent:
print('OLS')
try:
L_and_lower = cho_factor(ZZT.T, overwrite_a=True)
B = cho_solve(L_and_lower, YZT.T, overwrite_b=True,
check_finite=False).T
return B
except np.linalg.LinAlgError as e: # ZZT is (probably) indefinite
lu_piv = lu_factor(ZZT.T, overwrite_a=True)
B = lu_solve(lu_piv, YZT.T, overwrite_b=True,
check_finite=False).T
return B
else: # Regularized solution
if alpha == 1: # Tikhonov regularization with lmbda
if not silent:
print('L2 Regularization')
# R and subsequently YZT are overwritten in the lu solver
R = (ZZT + lmbda * np.eye(n * p)) # Has nothing to do with Rx
try:
L_and_lower = cho_factor(R.T, overwrite_a=True)
B = cho_solve(L_and_lower, YZT.T, overwrite_b=True,
check_finite=False).T
return B
except np.linalg.LinAlgError as e: # ZZT is (probably) indefinite
lu_piv = lu_factor(R.T, overwrite_a=True)
B = lu_solve(lu_piv, YZT.T, overwrite_b=True,
check_finite=False).T
return B
else: # DWGLASSO
if not silent:
print('DWGLASSO')
assert mu > 0, 'Required: mu > 0 (unless lmbda = 0, or alpha = 1)'
if alpha == 0:
if not silent:
warnings.warn('We need alpha > 0 to guarantee convergence,'
' to the optimal B matrix', RuntimeWarning)
r = (1 + 2 * mu * lmbda * alpha) / mu
R = (ZZT + r * np.eye(n * p)) # Has nothing to do with Rx
try:
L_and_lower = cho_factor(R.T, overwrite_a=True)
proxf = proxf_cho
return admm()
except np.linalg.LinAlgError as e: # ZZT is (probably) indefinite
lu_piv = lu_factor(R.T, overwrite_a=True)
proxf = proxf_lu
return admm()
def main():
p = 2
assert p <= MAX_P and p >= 1, 'p must be in (1, MAX_P)!'
ZZT = np.load(ZZT_FILE_PREFIX + str(p) + '_T' + '.npy')
YZT = np.load(YZT_FILE_PREFIX + str(p) + '_T' + '.npy')
XT = np.load(X_VALIDATE_FILE_PREFIX + '_T' + '.npy')
Y_test, Z_test = build_YZ(XT, p)
B_hat = dwglasso(ZZT, YZT, p, lmbda=370.0, alpha=0.2, tol=1e-11,
mu=0.1, max_iter=150, sigma=2.5, delta=0.1,
ret_B_err=False)
print('Non 0 entries:', np.sum(np.abs(B_hat) > 0),
'/', B_hat.size)
plt.imshow(B_hat)
plt.colorbar()
plt.title('DWGLASSO Test Run on T')
plt.show()
# Verify, by rolling accross the station axis, that we have everything
# correctly lined up. We expect the lowest error on the non-rolled
# Z_test matrix. Then another dip after it's been rolled all the way back
T = Y_test.shape[1]
n = Y_test.shape[0]
errs = []
for i in range(n + 10):
print('roll:', i, end='\r')
Y_hat = np.dot(B_hat, np.roll(Z_test, i, axis=0))
err_i = (np.linalg.norm(Y_hat - Y_test, ord='fro') ** 2) / T
errs.append(err_i)
plt.plot(range(n + 10), errs)
plt.title('Verification of alignment')
plt.xlabel('roll index')
plt.ylabel('Error')
plt.show()
return
if __name__ == '__main__':
main()
|
mit
|
morganics/bayesianpy
|
tests/data_reader.py
|
1
|
1982
|
import logging
import unittest.mock as mock
import unittest
import bayesianpy.network
import pandas as pd
import bayesianpy.utils.list
import tests.iris
import dask.dataframe as dd
import bayesianpy.reader
from timeit import default_timer as timer
class TestDaskDataReader(unittest.TestCase):
def test_pre_loading(self):
npartitions = 10
all_ddf = dd.concat([dd.from_pandas(tests.iris.create_iris_dataset(), npartitions=npartitions) for i in range(0, 500)], interleave_partitions=True)
#print(all_ddf)
total_length = len(all_ddf)
length_of_partition = len(all_ddf.get_partition(0).compute())
bayesianpy.jni.attach()
dfr = bayesianpy.reader.PandasDataReaderCommand(all_ddf, preload=False)
reader = dfr.executeReader()
start = timer()
for i in range(0, total_length):
reader.getCallable('read')()
end = timer()
non_preloaded = end-start
dfr = bayesianpy.reader.PandasDataReaderCommand(all_ddf, preload=True)
reader = dfr.executeReader()
start = timer()
for i in range(0, total_length):
reader.getCallable('read')()
end = timer()
preloaded = end-start
print(non_preloaded)
print(preloaded)
self.assertTrue(preloaded < non_preloaded)
#self.assertEqual(0, reader.getCallable("get_partition_index")())
self.assertEqual(1, reader.getCallable("get_loaded_partition_index")())
for i in range(0, total_length - (length_of_partition+10)):
reader.getCallable('read')()
self.assertEqual(npartitions, reader.getCallable("get_partition_index")())
self.assertEqual(npartitions, reader.getCallable("get_loaded_partition_index")())
#
# current_partition = reader.getCallable('get_partition')()
# print(current_partition)
# row_value = reader.getCallable('getString')(0)
# print(row_value)
|
apache-2.0
|
EconomicSL/housing-model
|
src/main/resources/calibration/code/SaleReprice.py
|
2
|
7180
|
# -*- coding: utf-8 -*-
"""
Defines several classes to study the reprice or price decrease behaviour of households trying to sell their houses. It
uses Zoopla data
@author: daniel, Adrian Carro
"""
import Datasets as ds
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import scipy.stats as stats
import math
class DiscountDistribution:
"""Class to collect and store the distribution of price discounts per month"""
# Number of months on the market to consider as sample (bins in the x axis for building the pdf)
x_size = 48 # 4 years
# For every month in the sample, countNoChange stores the number of properties not experiencing a drop in price
# between -90% and -0.2%
countNoChange = np.zeros(x_size)
# For every month in the sample, countTotal stores the number of properties in the data during that month
countTotal = np.zeros(x_size)
# For every month in the sample, changesByMonth stores a list of the logarithmic percent changes (in absolute value)
# in price of every property experiencing a drop in price between -90% and -0.2%
changesByMonth = [[] for i in range(x_size)]
# Need to implement an __init__ method
def __init__(self):
pass
# Record one listing with no change of price between start and end months
def record_no_change(self, start, end):
if end >= self.x_size:
end = self.x_size - 1
for month in range(start, end + 1):
self.countNoChange[month] += 1
self.countTotal[month] += 1
# Record one listing with a drop in price between -90% and -0.2% at month
def record_change(self, start, month, percent):
if -90 < percent < -0.2:
self.record_no_change(start, month - 1) # Record the listing as no change before month
if month < self.x_size: # Only record the change if month is within the sample
self.countTotal[month] += 1
self.changesByMonth[month].append(math.log(math.fabs(percent)))
else:
self.record_no_change(start, month)
# Probability that price will not change in a given month (given that the property is still on the market)
def probability_no_change(self):
return np.divide(self.countNoChange, self.countTotal)
# Probability that there will be no change per month, integrated over all months
def probability_no_change_all_time(self):
return self.probability_no_change().sum() / self.x_size
# Get a list of all changes, i.e., changesByMonth in a single list instead of a list of lists
def list_all_changes(self):
return [x for month in self.changesByMonth for x in month]
class PropertyRecord:
"""Class to function as record of the most recent price, initial price and days on market for a given property"""
current_price = 0
initial_market_price = 0
days_on_market = 0
last_change_date = 0
def __init__(self, initial_date, initial_price):
self.current_price = initial_price
self.initial_market_price = initial_price
self.days_on_market = 0
self.last_change_date = datetime.strptime(initial_date, "%Y-%m-%d")
def update_price(self, date_string, price):
new_date = datetime.strptime(date_string, "%Y-%m-%d")
previous_days_on_market = self.days_on_market
new_days_on_market = self.days_on_market + (new_date - self.last_change_date).days
reduction = (price - self.current_price) * 100.0 / self.current_price
# Previous equation: Discounts were computed as price difference between current and previous price over
# initial price
# reduction = (price - self.current_price) * 100.0 / self.initial_market_price
self.current_price = price
self.days_on_market = new_days_on_market
self.last_change_date = new_date
return previous_days_on_market, new_days_on_market, reduction
def plot_probability(mat):
"""Plot a matrix mat as a colour plot, used for plotting a pdf"""
plt.figure(figsize=(10, 10))
im = plt.imshow(mat, origin='low', cmap=plt.get_cmap("jet"))
plt.colorbar(im, orientation='horizontal')
plt.show()
def calculate_price_changes(filtered_zoopla_data):
"""Compute and return the discount distribution"""
distribution = DiscountDistribution()
dict_of_property_records = {}
for index, row in filtered_zoopla_data.iterrows():
# If listing is already at price_map...
if row["LISTING ID"] in dict_of_property_records:
# ...recover its PriceCalc object as last_record
last_record = dict_of_property_records[row["LISTING ID"]]
# ...store the PriceCalc object previous current_price as old_price
old_price = last_record.current_price
# ...update the PriceCalc object with the most recent information (day and price)
prev_days_on_market, new_days_on_market, reduction = last_record.update_price(row["DAY"], row["PRICE"])
# If price has not changed, then record the no change to the DiscountDistribution
if old_price == row["PRICE"]:
distribution.record_no_change(prev_days_on_market / 30, new_days_on_market / 30)
# Otherwise, record the change to the DiscountDistribution
else:
distribution.record_change(prev_days_on_market / 30, new_days_on_market / 30, reduction)
# Otherwise, add PriceCalc object of the listing to price_map
else:
dict_of_property_records[row["LISTING ID"]] = PropertyRecord(row["DAY"], row["PRICE"])
return distribution
# Read and filter data from Zoopla
data = ds.ZooplaMatchedDaily()
chunk = data.read(200000)
filtered_chunk = chunk[(chunk["MARKET"] == "SALE") & (chunk["PRICE"] > 0)][["LISTING ID", "DAY", "PRICE"]]
# Compute probability distribution of price discounts
dist = calculate_price_changes(filtered_chunk)
# Plot probability of no change per month on market
print "Average probability of no change per month"
print dist.probability_no_change().sum() / dist.probability_no_change().size
print "Probability of no change per month"
print dist.probability_no_change()
plt.figure()
plt.plot(dist.probability_no_change())
plt.xlabel("Months on market")
plt.ylabel("Probability of no price change")
# Plot average price discount per month on market
mean, sd = stats.norm.fit(dist.list_all_changes())
monthlyMeans = [stats.norm.fit(dist.changesByMonth[i])[0] for i in range(dist.x_size)]
print "Best mean and standard deviation of percentage change per month given change"
print mean, sd
print "Monthly Means"
print monthlyMeans
plt.figure()
plt.plot(monthlyMeans)
plt.xlabel("Months on market")
plt.ylabel("Percent discount")
# Plot probability distribution of price discounts (independent of month on market)
curve = [stats.norm.pdf(i * 0.05, mean, sd) for i in range(-35, 100)]
plt.figure()
plt.hist(dist.list_all_changes(), bins=50, density=True, label="Data")
plt.plot([i * 0.05 for i in range(-35, 100)], curve, label="Normal fit")
plt.xlabel("Percent discount")
plt.ylabel("Probability")
plt.legend()
plt.show()
|
mit
|
kikimaroca/beamtools
|
beamtools/dev/test data/pulse_interp/spectrumplot.py
|
1
|
2720
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 8 17:47:28 2018
@author: cpkmanchee
"""
import numpy as np
import matplotlib.pyplot as plt
import beamtools as bt
show_plt = True
save_plt = False
dpi=600
wlim = [1020,1040]
tlim = [-10,20]
file = '20180103-004-000ac001.txt'
ac = bt.import_data_file(file, 'bt_ac')
ac.intensity = ac.power
ac.power = bt.normalize(ac.power)
ac_fit,_ = bt.pulse.fit_ac([ac.delay,ac.power], bgform='linear')
file = '20180103-004-000.csv'
spec = bt.import_data_file(file, 'oo_spec')
spec.intensity = bt.normalize(spec.intensity)
flimit, _ = bt.pulse.spectrumFT([spec.wavelength,spec.intensity])
AiFT = bt.pulse.autocorr(np.abs(flimit.et)**2)
pfit, _ = bt.pulse.fit_ac([flimit.time,AiFT])
fig = plt.figure(figsize=(4,3.25), facecolor='w')
#ax2 = ax1.twinx()
#ax2.scatter(spec.pump, spec.energy, s=0)
#ax2.set_ylabel('Pulse energy (uJ)')
#ax2.tick_params('y', colors='k')
#ax1.text(20,65, '{:.0f}% slope \nefficiency'.format(slope*100))
#scale = np.around(spec.energy.values[-1]/spec.out.values[-1], decimals=2)
#ax1.set_ylim([0,np.around(spec.intensity.max(),decimals=-1)])
#ax2.set_ylim(np.asarray(ax1.get_ylim())*scale)
f2p = 'sech2'
ax2 = fig.add_subplot(111)
for fit in pfit:
if fit.ftype.lower() in bt.alias_dict[f2p]:
ax2.plot(flimit.time*1E12,bt.normalize(AiFT), '-',
label='TL pulse', c='xkcd:ocean blue')
label = 'TL - {} fit\n{:.2f} ps'.format(fit.ftype, bt.pulse.sigma_fwhm(fit.popt[0]*1E12,fit.ftype))
ax2.plot(flimit.time*1E12, bt.normalize(fit.subs(flimit.time)), '--',
label=label, c='xkcd:deep purple')
for fit in ac_fit:
if fit.ftype.lower() in bt.alias_dict[f2p]:
ax2.plot(ac.delay-fit.popt[2],bt.normalize(bt.rmbg([ac.delay,ac.power],fit=fit)), '-',
label='Autocorrelation', c='xkcd:chartreuse')
label = 'AC - {} fit\n{:.2f} ps'.format(fit.ftype, bt.pulse.sigma_fwhm(fit.popt[0],fit.ftype))
ax2.plot(ac.delay-fit.popt[2], bt.normalize(bt.rmbg([ac.delay,fit.subs(ac.delay)],fit=fit)), '--',
label=label, c='xkcd:dark green')
ax2.set_xlim(tlim)
ax2.set_xlabel('Delay (ps)')
ax2.set_ylabel('Intensity (arb.)')
ax2.legend(loc=[0.55,0.1], fontsize='small')
fig.tight_layout()
ax1 = plt.axes([.66, .64, .25, .25], facecolor='y')
ax1.plot(spec.wavelength, spec.intensity, '-', c='xkcd:ocean blue')
#ax1.set_xlabel('Wavelength (nm)')
# Make the y-axis label, ticks and tick labels match the line color.
#ax1.set_ylabel('Intensity')
ax1.set_xticks([1020,1030,1040])
ax1.tick_params('both', labelsize='x-small')
ax1.set_xlim(wlim)
if show_plt:
plt.show()
if save_plt:
fig.savefig('spectrum.png', dpi=dpi, bbox_inches='tight')
|
mit
|
abhishekgahlot/scikit-learn
|
examples/plot_digits_pipe.py
|
250
|
1809
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
gwulfs/zipline
|
zipline/sources/data_frame_source.py
|
26
|
5253
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import numpy as np
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Data source that yields from a pandas DataFrame.
:Axis layout:
* columns : sids
* index : datetime
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the DataFrame
assert isinstance(data.columns, pd.Int64Index)
# TODO is ffilling correct/necessary?
# Forward fill prices
self.data = data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.index[0])
self.end = kwargs.get('end', self.data.index[-1])
self.sids = self.data.columns
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(price) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
'price': price,
# Just chose something large
# if no volume available.
'volume': 1e9,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Data source that yields from a pandas Panel.
:Axis layout:
* items : sids
* major_axis : datetime
* minor_axis : price, volume, ...
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the Panel
assert isinstance(data.items, pd.Int64Index)
# TODO is ffilling correct/necessary?
# forward fill with volumes of 0
self.data = data.fillna(value={'volume': 0})
self.data = self.data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.major_axis[0])
self.end = kwargs.get('end', self.data.major_axis[-1])
self.sids = self.data.items
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(series['price']) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
|
apache-2.0
|
crichardson17/starburst_atlas
|
Low_resolution_sims/Dusty_LowRes/Geneva_cont_Rot/Geneva_cont_Rot_5/fullgrid/Rest.py
|
30
|
9192
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [3,4,15,22,37,53,54,55,57,62,77,88,89,90,92,93]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Rest of the Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_Rest.pdf')
plt.clf()
print "figure saved"
|
gpl-2.0
|
webmasterraj/GaSiProMo
|
flask/lib/python2.7/site-packages/pandas/tseries/period.py
|
2
|
32600
|
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date, timedelta
import numpy as np
from pandas.core.base import PandasObject
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
from pandas._period import Period
import pandas._period as period
from pandas._period import (
get_period_field_arr,
_validate_end_alias,
_quarter_to_myear,
)
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries,
is_integer, is_float)
from pandas import compat
from pandas.lib import Timestamp, Timedelta
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
from pandas.compat import zip, u
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self.values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _get_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if isinstance(data[0], Period):
return period.extract_ordinals(data, freq)
else:
return lib.map_infer(data, f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = getattr(self.values, opname)(other.values)
mask = (com.mask_missing(self.values, tslib.iNaT) |
com.mask_missing(other.values, tslib.iNaT))
if mask.any():
result[mask] = nat_result
return result
else:
other = Period(other, freq=self.freq)
func = getattr(self.values, opname)
result = func(other.ordinal)
if other.ordinal == tslib.iNaT:
result.fill(nat_result)
mask = self.values == tslib.iNaT
if mask.any():
result[mask] = nat_result
return result
return wrapper
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name','freq']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear', 'freq', 'days_in_month', 'daysinmonth']
_is_numeric_dtype = False
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, **kwargs):
freq = frequencies.get_standard_freq(freq)
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=False)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if not isinstance(data, (np.ndarray, PeriodIndex, DatetimeIndex, Int64Index)):
if np.isscalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = com._ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq).ordinal for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = com._ensure_object(data)
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
data = _get_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data.values, base1,
base2, 1)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
try:
data = com._ensure_int64(data)
except (TypeError, ValueError):
data = com._ensure_object(data)
data = _get_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
@property
def _na_value(self):
return self._box_func(tslib.iNaT)
def __contains__(self, key):
if not isinstance(key, Period) or key.freq != self.freq:
if isinstance(key, compat.string_types):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
""" return an array repr of this object, potentially casting to object """
return self.asobject.values
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self.values[mask].searchsorted(where_idx.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.values < self.values[first])] = -1
return result
def _array_values(self):
return self.asobject
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return Index(np.array(list(self), dtype), dtype)
elif dtype == _INT64_DTYPE:
return Index(self.values, dtype)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
def searchsorted(self, key, side='left'):
if isinstance(key, Period):
if key.freq != self.freq:
raise ValueError("Different period frequency: %s" % key.freq)
key = key.ordinal
elif isinstance(key, compat.string_types):
key = Period(key, freq=self.freq).ordinal
return self.values.searchsorted(key, side=side)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
@property
def freqstr(self):
return self.freq
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
freq = frequencies.get_standard_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_data = period.period_asfreq_arr(self.values, base1, base2, end)
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10, "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9, "The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11, "The number of days in the month")
daysinmonth = days_in_month
def _get_object_array(self):
freq = self.freq
return np.array([ Period._from_ordinal(ordinal=x, freq=freq) for x in self.values], copy=False)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'int64'):
try:
other = PeriodIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _add_delta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return self.shift(nanos // offset_nanos)
elif isinstance(other, offsets.DateOffset):
freqstr = frequencies.get_standard_freq(other)
base = frequencies.get_base_alias(freqstr)
if base == self.freq:
return self.shift(other.n)
raise ValueError("Input has different freq from PeriodIndex(freq={0})".format(self.freq))
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
freq : freq string
Returns
-------
shifted : PeriodIndex
"""
mask = self.values == tslib.iNaT
values = self.values + n
values[mask] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
try:
return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies._infer_period_group(reso)
freqn = frequencies._period_group(self.freq)
vals = self.values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
def get_indexer(self, target, method=None, limit=None):
if hasattr(target, 'freq') and target.freq != self.freq:
raise ValueError('target and index have different freq: '
'(%s, %s)' % (target.freq, self.freq))
return Index.get_indexer(self, target, method, limit)
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
key = Period(key, self.freq)
try:
return Index.get_loc(self, key.ordinal, method=method)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according to
resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice',label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second,
freq='S')
else:
raise KeyError(key)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies._infer_period_group(reso)
freqn = frequencies._period_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
raise ValueError('Only like-indexed PeriodIndexes compatible '
'for join (for now)')
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex(rawarr, freq=self.freq)
return rawarr
def __getitem__(self, key):
getitem = self._data.__getitem__
if np.isscalar(key):
val = getitem(key)
return Period(ordinal=val, freq=self.freq)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
result = getitem(key)
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return PeriodIndex(result, name=self.name, freq=self.freq)
return PeriodIndex(result, name=self.name, freq=self.freq)
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([u('%s') % dt for dt in values[imask]])
return values.tolist()
def __array_finalize__(self, obj):
if not self.ndim: # pragma: no cover
return self.item()
self.freq = getattr(obj, 'freq', None)
self.name = getattr(obj, 'name', None)
self._reset_identity()
def _format_footer(self):
tagline = 'Length: %d, Freq: %s'
return tagline % (len(self), self.freqstr)
def take(self, indices, axis=None):
"""
Analogous to ndarray.take
"""
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return self._simple_new(taken, self.name, freq=self.freq)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
if isinstance(to_concat[0], PeriodIndex):
if len(set([x.freq for x in to_concat])) > 1:
# box
to_concat = [x.asobject.values for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
return PeriodIndex(cat_values, freq=self.freq, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
try: # backcompat
self.freq = own_state[1]
except:
pass
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
infer_dst : boolean, default False
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('Start and end must have same freq')
if ((is_start_per and start.ordinal == tslib.iNaT) or
(is_end_per and end.ordinal == tslib.iNaT)):
raise ValueError('Start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(period.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start :
end :
periods : int, default None
Number of periods in the index
freq : str/DateOffset, default 'D'
Frequency alias
name : str, default None
Name for the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
|
gpl-2.0
|
riddhishb/ipython-notebooks
|
Adaboost/adaboost.py
|
1
|
2683
|
import numpy as np
import matplotlib.pyplot as plt
def weaklearner(thr,sign,dim,x):
if(sign == 1):
y = (x[:,dim] >= thr)
else:
y = (x[:,dim] < thr)
y = y.astype(np.int64)
y[np.where(y==0)] = -1
return y
print "Generating Simulated Data"
# Code to enter the values of these variables
T = 10
N = 1000
dim = 2
x = np.random.randn(N, 2) # dim=2
s = (N, 1)
# label = np.zeros(s) #linear separation example
label = np.zeros(s) # nonlinear separation example
# for index in range(0,N):
#label[index] = x[index][0] < x[index][1]
for index in range(0, N):
label[index] = (x[index][0]**2 + x[index][1]**2) < 1
label = label * 1.0
pos1 = np.nonzero(label == 1)
pos2 = np.where(label == 0)[0]
label[pos2] = -1
# plots the data
plt.figure()
plt.plot(x[pos1, 0], x[pos1, 1], 'b*')
plt.plot(x[pos2, 0], x[pos2, 1], 'r*')
plt.axis([-3, 3, -3, 3])
plt.legend('class 1', 'class 2', loc=2)
plt.title("Simulated (Original) data")
# declare parameters
weight = np.ones(N, dtype = np.float64) / (N)
err = np.ones(T, dtype = np.float64) * np.inf
alpha = np.zeros(T, dtype = np.float64)
h = np.zeros([T,3], dtype = np.float64)
thresholds = np.arange(-3.0, 3.0, 0.1)
print "Training"
for t in range(T):
for thr in thresholds:
for sign in [-1, 1]:
for dim in [0, 1]:
tmpe = np.sum(weight * (weaklearner(thr,sign,dim,x) != label[:,0]).astype(np.int64))
if( tmpe < err[t]):
err[t] = tmpe
h[t,0] = thr
h[t,1] = sign
h[t,2] = dim
if(err[t] >= 0.5):
print "error"
break
alpha[t] = 0.5 * np.log((1-err[t])/err[t])
# % we update D so that wrongly classified samples will have more weight
weight = weight * np.exp(-alpha[t] * label[:,0] * weaklearner(h[t,0],h[t,1],h[t,2],x))
weight = weight / np.sum(weight)
finalLabel = np.zeros_like(label);
finalLabel = finalLabel.astype(np.float64)
misshits = np.zeros(T)
print "Testing"
for t in range(T):
finalLabel[:,0] = finalLabel[:,0] + alpha[t] * weaklearner(h[t,0],h[t,1],h[t,2],x)
tfinalLabel = np.sign(finalLabel[:,0])
misshits[t] = np.sum((tfinalLabel != label[:,0]).astype(np.float64))/N
pos1 = np.where(tfinalLabel == 1)
pos2 = np.where(tfinalLabel == -1)
print "Results"
plt.figure()
plt.plot(x[pos1, 0], x[pos1, 1], 'b*')
plt.plot(x[pos2, 0], x[pos2, 1], 'r*')
plt.axis([-3, 3, -3, 3])
plt.legend('class 1', 'class 2', loc=2)
plt.title("Tested (Original) data")
# % plot miss hits when more and more weak learners are used
plt.figure()
plt.plot(misshits)
plt.ylabel('miss hists')
plt.show()
|
gpl-3.0
|
adammenges/statsmodels
|
statsmodels/examples/try_polytrend.py
|
33
|
1477
|
from __future__ import print_function
import numpy as np
#import statsmodels.linear_model.regression as smreg
from scipy import special
import statsmodels.api as sm
from statsmodels.datasets.macrodata import data
dta = data.load()
gdp = np.log(dta.data['realgdp'])
from numpy import polynomial
from scipy import special
maxorder = 20
polybase = special.chebyt
polybase = special.legendre
t = np.linspace(-1,1,len(gdp))
exog = np.column_stack([polybase(i)(t) for i in range(maxorder)])
fitted = [sm.OLS(gdp, exog[:, :maxr]).fit().fittedvalues for maxr in
range(2,maxorder)]
print((np.corrcoef(exog[:,1:6], rowvar=0)*10000).astype(int))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(gdp, 'o')
for i in range(maxorder-2):
plt.plot(fitted[i])
plt.figure()
#plt.plot(gdp, 'o')
for i in range(maxorder-4, maxorder-2):
#plt.figure()
plt.plot(gdp - fitted[i])
plt.title(str(i+2))
plt.figure()
plt.plot(gdp, '.')
plt.plot(fitted[-1], lw=2, color='r')
plt.plot(fitted[0], lw=2, color='g')
plt.title('GDP and Polynomial Trend')
plt.figure()
plt.plot(gdp - fitted[-1], lw=2, color='r')
plt.plot(gdp - fitted[0], lw=2, color='g')
plt.title('Residual GDP minus Polynomial Trend (green: linear, red: legendre(20))')
#orthonormalize an exog using QR
ex2 = t[:,None]**np.arange(6) #np.vander has columns reversed
q2,r2 = np.linalg.qr(ex2, mode='full')
np.max(np.abs(np.dot(q2.T, q2)-np.eye(6)))
plt.figure()
plt.plot(q2, lw=2)
plt.show()
|
bsd-3-clause
|
CCI-Tools/cate-core
|
tests/ops/test_data_frame.py
|
1
|
13782
|
from unittest import TestCase
import geopandas as gpd
import numpy as np
import pandas as pd
import shapely.geometry
import shapely.wkt
from shapely.geometry import Point
from cate.core.types import ValidationError
from cate.core.types import GeoDataFrameProxy
from cate.ops.data_frame import data_frame_min, data_frame_max, data_frame_query, data_frame_find_closest, \
great_circle_distance, data_frame_aggregate, data_frame_subset
test_point = 'POINT (597842.4375881671 5519903.13366397)'
test_poly_4326 = 'POLYGON ((-80 -40, -70 -40, ' \
'-70 -45, -80 -45, ' \
'-80 -40))'
class TestDataFrameOps(TestCase):
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': ['a', 'b', 'c', 'x', 'y', 'z'],
'C': [False, False, True, False, True, True],
'D': [0.4, 0.5, 0.3, 0.3, 0.1, 0.4]})
gdf = gpd.GeoDataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': ['a', 'b', 'c', 'x', 'y', 'z'],
'C': [False, False, True, False, True, True],
'D': [0.4, 0.5, 0.3, 0.3, 0.1, 0.4],
'geometry': gpd.GeoSeries([
shapely.wkt.loads('POINT(10 10)'),
shapely.wkt.loads('POINT(10 20)'),
shapely.wkt.loads('POINT(10 30)'),
shapely.wkt.loads('POINT(20 30)'),
shapely.wkt.loads('POINT(20 20)'),
shapely.wkt.loads('POINT(20 10)'),
])})
gdf_32718 = gpd.GeoDataFrame({'A': [1]},
crs={'init': 'epsg:32718'},
geometry=[shapely.wkt.loads(test_point)])
test_region_4326 = shapely.wkt.loads(test_poly_4326)
gdfp = GeoDataFrameProxy.from_features(gdf.__geo_interface__['features'])
def test_data_frame_min(self):
df2 = data_frame_min(TestDataFrameOps.df, 'D')
self.assertIsInstance(df2, pd.DataFrame)
self.assertEqual(len(df2), 1)
self.assertEqual(list(df2.columns), ['A', 'B', 'C', 'D'])
self.assertEqual(df2.iloc[0, 0], 5)
self.assertEqual(df2.iloc[0, 1], 'y')
self.assertEqual(df2.iloc[0, 2], True)
self.assertEqual(df2.iloc[0, 3], 0.1)
def test_data_frame_max(self):
df2 = data_frame_max(TestDataFrameOps.df, 'D')
self.assertIsInstance(df2, pd.DataFrame)
self.assertEqual(len(df2), 1)
self.assertEqual(list(df2.columns), ['A', 'B', 'C', 'D'])
self.assertEqual(df2.iloc[0, 0], 2)
self.assertEqual(df2.iloc[0, 1], 'b')
self.assertEqual(df2.iloc[0, 2], False)
self.assertEqual(df2.iloc[0, 3], 0.5)
def test_data_frame_query(self):
df2 = data_frame_query(TestDataFrameOps.df, "D >= 0.4 and B != 'b'")
self.assertIsInstance(df2, pd.DataFrame)
self.assertEqual(len(df2), 2)
self.assertEqual(list(df2.columns), ['A', 'B', 'C', 'D'])
self.assertEqual(df2.iloc[0, 0], 1)
self.assertEqual(df2.iloc[0, 1], 'a')
self.assertEqual(df2.iloc[0, 2], False)
self.assertEqual(df2.iloc[0, 3], 0.4)
self.assertEqual(df2.iloc[1, 0], 6)
self.assertEqual(df2.iloc[1, 1], 'z')
self.assertEqual(df2.iloc[1, 2], True)
self.assertEqual(df2.iloc[1, 3], 0.4)
def test_data_frame_query_with_geom(self):
self._test_data_frame_query_with_geom(TestDataFrameOps.gdf)
# Skipped due to new behaviour of from_features
# self._test_data_frame_query_with_geom(TestDataFrameOps.gdfp)
def _test_data_frame_query_with_geom(self, gdf):
df2 = data_frame_query(gdf, "not C and @almost_equals('10,10')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 1)
df2 = data_frame_query(gdf, "not C and @contains('10,10')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 1)
df2 = data_frame_query(gdf, "not C and @crosses('10,10')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 0)
df2 = data_frame_query(gdf, "not C and @disjoint('10,10')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 2)
df2 = data_frame_query(gdf, "not C and @intersects('19, 9, 21, 31')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 1)
df2 = data_frame_query(gdf, "not C and @touches('10, 10, 20, 30')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 3)
df2 = data_frame_query(gdf, "@within('19, 9, 21, 31')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 3)
self.assertEqual(list(df2.columns), ['A', 'B', 'C', 'D', 'geometry'])
self.assertEqual(df2.iloc[0, 0], 4)
self.assertEqual(df2.iloc[1, 0], 5)
self.assertEqual(df2.iloc[2, 0], 6)
self.assertEqual(df2.iloc[0, 1], 'x')
self.assertEqual(df2.iloc[1, 1], 'y')
self.assertEqual(df2.iloc[2, 1], 'z')
self.assertEqual(df2.iloc[0, 2], False)
self.assertEqual(df2.iloc[1, 2], True)
self.assertEqual(df2.iloc[2, 2], True)
self.assertEqual(df2.iloc[0, 3], 0.3)
self.assertEqual(df2.iloc[1, 3], 0.1)
self.assertEqual(df2.iloc[2, 3], 0.4)
df2 = data_frame_query(gdf, "not C and @within('19, 9, 21, 31')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 1)
self.assertEqual(list(df2.columns), ['A', 'B', 'C', 'D', 'geometry'])
self.assertEqual(df2.iloc[0, 0], 4)
self.assertEqual(df2.iloc[0, 1], 'x')
self.assertEqual(df2.iloc[0, 2], False)
self.assertEqual(df2.iloc[0, 3], 0.3)
df2 = data_frame_query(gdf, "not C and geometry.within(@from_wkt('19, 9, 21, 31'))")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 1)
self.assertEqual(list(df2.columns), ['A', 'B', 'C', 'D', 'geometry'])
self.assertEqual(df2.iloc[0, 0], 4)
self.assertEqual(df2.iloc[0, 1], 'x')
self.assertEqual(df2.iloc[0, 2], False)
self.assertEqual(df2.iloc[0, 3], 0.3)
def test_data_frame_subset(self):
df2 = data_frame_subset(TestDataFrameOps.gdf,
region='POLYGON((-10 0, 25 0, 25 30, -10 0))')
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 3)
self.assertIn('A', df2)
self.assertIn('B', df2)
self.assertIn('C', df2)
self.assertIn('D', df2)
self.assertIn('geometry', df2)
df2 = data_frame_subset(TestDataFrameOps.gdf,
var_names="A,C",
region='POLYGON((-10 0, 25 0, 25 30, -10 0))')
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 3)
self.assertIn('A', df2)
self.assertNotIn('B', df2)
self.assertIn('C', df2)
self.assertNotIn('D', df2)
self.assertIn('geometry', df2)
df2 = data_frame_subset(TestDataFrameOps.gdf,
var_names="A,C",
region='POLYGON((30 30, 40 30, 40 40, 30 30))')
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 0)
def test_data_frame_failures(self):
df2 = data_frame_query(TestDataFrameOps.gdf_32718, "@within('" + test_poly_4326 + "')")
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 1)
df2 = data_frame_subset(TestDataFrameOps.gdf_32718,
var_names='A',
region=TestDataFrameOps.test_region_4326)
self.assertEqual(len(df2), 1)
def test_data_frame_find_closest(self):
df2 = data_frame_find_closest(TestDataFrameOps.gdf, 'POINT(20 30)',
dist_col_name='dist')
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 1)
self.assertIn('A', df2)
self.assertIn('B', df2)
self.assertIn('C', df2)
self.assertIn('D', df2)
self.assertIn('geometry', df2)
self.assertIn('dist', df2)
self.assertEqual(1, len(df2['dist']))
self.assertEqual(0.0, df2['dist'].iloc[0])
self.assertEqual(shapely.wkt.loads('POINT(20 30)'), df2['geometry'].iloc[0])
df2 = data_frame_find_closest(TestDataFrameOps.gdf, 'POINT(21 28)',
max_results=3, dist_col_name='dist')
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 3)
self.assertIn('A', df2)
self.assertIn('B', df2)
self.assertIn('C', df2)
self.assertIn('D', df2)
self.assertIn('geometry', df2)
self.assertIn('dist', df2)
np.testing.assert_approx_equal(2.1828435, df2['dist'].iloc[0])
np.testing.assert_approx_equal(8.0518568, df2['dist'].iloc[1])
np.testing.assert_approx_equal(9.8221713, df2['dist'].iloc[2])
self.assertEqual(shapely.wkt.loads('POINT(20 30)'), df2['geometry'].iloc[0])
self.assertEqual(shapely.wkt.loads('POINT(20 20)'), df2['geometry'].iloc[1])
self.assertEqual(shapely.wkt.loads('POINT(10 30)'), df2['geometry'].iloc[2])
df2 = data_frame_find_closest(TestDataFrameOps.gdf, 'POINT(21 28)',
max_dist=9.0, max_results=3, dist_col_name='dist')
self.assertIsInstance(df2, gpd.GeoDataFrame)
self.assertEqual(len(df2), 2)
self.assertIn('A', df2)
self.assertIn('B', df2)
self.assertIn('C', df2)
self.assertIn('D', df2)
self.assertIn('geometry', df2)
self.assertIn('dist', df2)
np.testing.assert_approx_equal(2.1828435, df2['dist'].iloc[0])
np.testing.assert_approx_equal(8.0518568, df2['dist'].iloc[1])
self.assertEqual(shapely.wkt.loads('POINT(20 30)'), df2['geometry'].iloc[0])
self.assertEqual(shapely.wkt.loads('POINT(20 20)'), df2['geometry'].iloc[1])
def test_data_frame_aggregate(self):
# Generate mock data
data = {'name': ['A', 'B', 'C'],
'lat': [45, 46, 47.5],
'lon': [-120, -121.2, -122.9]}
df = pd.DataFrame(data)
# needs to be a copy
gdf_empty_geo = gpd.GeoDataFrame(df).copy()
gdf = gpd.GeoDataFrame(df, geometry=[Point(xy) for xy in zip(df['lon'], df['lat'])])
var_names_not_agg = 'name, lat, lon'
var_names_not_in = 'asdc, lat, lon'
var_names_valid = ['lat', 'lon']
aggregations = ["count", "mean", "median", "sum", "std", "min", "max"]
# Assert that a Validation exception is thrown if the df is None
with self.assertRaises(ValidationError):
data_frame_aggregate(df=None)
# Assert that a Validation exception is thrown if the var_names contain non-existing fields in the df
with self.assertRaises(ValidationError):
data_frame_aggregate(df=df, var_names=var_names_not_in)
# Assert that a Validation exception is thrown if the var_names contain non-aggregatable fields
with self.assertRaises(ValidationError):
data_frame_aggregate(df=df, var_names=var_names_not_agg)
# Assert that a Validation exception is thrown if the GeoDataFrame does not have a geometry
with self.assertRaises(ValidationError):
data_frame_aggregate(df=gdf_empty_geo, var_names=None)
with self.assertRaises(ValidationError):
data_frame_aggregate(df=gdf_empty_geo, var_names='lat')
# assert that a input and output types for df are the same
rdf = data_frame_aggregate(df=gdf, var_names=var_names_valid)
self.assertEqual(len(rdf), 1)
# assert that columns are return if var_names = None for a DataFrame
rdf = data_frame_aggregate(df=df, var_names=None)
self.assertEqual(len(rdf.columns), len(aggregations) * len(var_names_valid))
# assert that columns are return if var_names = None for a GeoDataFrame
rdf = data_frame_aggregate(df=gdf, var_names=None, aggregate_geometry=True)
self.assertEqual(len(rdf.columns), len(aggregations) * len(var_names_valid) + 1)
# assert that geometry union is created
rdf = data_frame_aggregate(df=gdf, var_names=var_names_valid, aggregate_geometry=True)
self.assertIsNotNone(rdf.geometry)
class GreatCircleDistanceTest(TestCase):
def test_great_circle_distance(self):
dist = great_circle_distance(Point(20, 20), Point(20, 20))
self.assertIsNotNone(dist)
np.testing.assert_approx_equal(0.0, dist)
dist = great_circle_distance(Point(20, 0), Point(20, 30))
np.testing.assert_approx_equal(30.0, dist)
dist = great_circle_distance(Point(-20, 0), Point(20, 0))
np.testing.assert_approx_equal(40.0, dist)
dist = great_circle_distance(Point(-155, 0), Point(155, 0))
np.testing.assert_approx_equal(50.0, dist)
dist = great_circle_distance(Point(0, 0), Point(0, 90))
np.testing.assert_approx_equal(90.0, dist)
dist = great_circle_distance(Point(0, -90), Point(0, 90))
np.testing.assert_approx_equal(180.0, dist)
dist = great_circle_distance(Point(0, 180), Point(0, 0))
np.testing.assert_approx_equal(180.0, dist)
dist = great_circle_distance(Point(0, 0), Point(1, 1))
np.testing.assert_approx_equal(1.4141777, dist)
|
mit
|
DonBeo/scikit-learn
|
examples/datasets/plot_random_multilabel_dataset.py
|
93
|
3460
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
|
bsd-3-clause
|
hugobowne/scikit-learn
|
examples/mixture/plot_gmm_selection.py
|
36
|
3271
|
"""
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
|
bsd-3-clause
|
cytomine/Cytomine-python-datamining
|
cytomine-datamining/algorithms/sldc/examples/with_pyxit/add_and_run_job.py
|
1
|
7783
|
import tempfile
from argparse import ArgumentParser
import os
import cv2
import numpy as np
from cytomine import Cytomine
from cytomine.models import AlgoAnnotationTerm
from cytomine_sldc import CytomineSlide, CytomineTileBuilder
from shapely.affinity import affine_transform, translate
from sklearn.utils import check_random_state
from sldc import DispatchingRule, ImageWindow, Loggable, Logger, Segmenter, StandardOutputLogger, WorkflowBuilder
from cytomine_utilities import CytomineJob
from pyxit_classifier import PyxitClassifierAdapter
def _upload_annotation(cytomine, img_inst, polygon, label=None, proba=1.0):
"""Upload an annotation and its term (if provided)"""
image_id = img_inst.id
# Transform polygon to match cytomine (bottom-left) origin point
polygon = affine_transform(polygon, [1, 0, 0, -1, 0, img_inst.height])
annotation = cytomine.add_annotation(polygon.wkt, image_id)
if label is not None and annotation is not None:
cytomine.add_annotation_term(annotation.id, label, label, proba, annotation_term_model=AlgoAnnotationTerm)
class DemoSegmenter(Segmenter):
def __init__(self, threshold):
"""A simple segmenter that performs a simple thresholding on the Green channel of the image"""
self._threshold = threshold
def segment(self, image):
mask = np.array(image[:, :, 1] < self._threshold).astype(np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
return mask * 255
class ValidAreaRule(DispatchingRule):
def __init__(self, min_area):
"""A rule which matches polygons of which the area is greater than min_area"""
super(ValidAreaRule, self).__init__()
self._min_area = min_area
def evaluate(self, image, polygon):
return self._min_area < polygon.area
class DemoJob(CytomineJob, Loggable):
def __init__(self, cytomine, software_id, project_id, job_parameters,
tile_overlap, tile_width, tile_height, n_jobs, threshold,
min_area, model_path, rseed, working_path):
"""An example job implementing an sldc workflow.
Parameters
----------
cytomine: Cytomine
Cytomine client
software_id: int
Cytomine software id
project_id: int
Cytomine project id
job_parameters: dict
Job parameters
tile_overlap: int
Number of pixel of overlap between the tiles
tile_width: int
Maximum width of the tiles
tile_height: int
Maximum height of the tiles
n_jobs: int
Number of available jobs
threshold: int
Segmentation threshold in [0, 255]
min_area: int
Minimum area of the valid objects in pixel squared
model_path: str
Path to the pickled pyxit model
rseed: int
Random seed
working_path: str
Working path of the workflow (for temporary files)
"""
CytomineJob.__init__(self, cytomine, software_id, project_id, parameters=job_parameters)
Loggable.__init__(self, logger=StandardOutputLogger(Logger.INFO))
self._cytomine = cytomine
# create workflow component
random_state = check_random_state(rseed)
tile_builder = CytomineTileBuilder(cytomine, working_path=working_path)
segmenter = DemoSegmenter(threshold)
area_rule = ValidAreaRule(min_area)
classifier = PyxitClassifierAdapter.build_from_pickle(
model_path, tile_builder, self.logger,
random_state=random_state,
n_jobs=n_jobs,
working_path=working_path
)
builder = WorkflowBuilder()
builder.set_n_jobs(n_jobs)
builder.set_logger(self.logger)
builder.set_overlap(tile_overlap)
builder.set_tile_size(tile_width, tile_height)
builder.set_tile_builder(tile_builder)
builder.set_segmenter(segmenter)
builder.add_classifier(area_rule, classifier, dispatching_label="valid")
self._workflow = builder.get()
def run(self, slide):
"""Run the workflow on the given image and upload the results to cytomine"""
results = self._workflow.process(slide)
# Upload results
for polygon, dispatch, cls, proba in results:
if cls is not None:
# if image is a window, the polygon must be translated
if isinstance(slide, ImageWindow):
polygon = translate(polygon, slide.abs_offset_x, slide.abs_offset_y)
# actually upload the annotation
_upload_annotation(
self._cytomine,
slide.image_instance,
polygon,
label=cls,
proba=proba
)
return results
def main(argv):
parser = ArgumentParser(prog="Demo_SLDC_Workflow_With_Pyxit", description="Demo software for SLDC Workflow on Cytomine")
parser.add_argument('--cytomine_host', dest="cytomine_host", default='demo.cytomine.be')
parser.add_argument('--cytomine_public_key', dest="cytomine_public_key")
parser.add_argument('--cytomine_private_key', dest="cytomine_private_key")
parser.add_argument('--cytomine_base_path', dest="cytomine_base_path", default='/api/')
default_working_path = os.path.join(tempfile.gettempdir(), "cytomine")
parser.add_argument('--cytomine_working_path', dest="cytomine_working_path", default=default_working_path)
parser.add_argument('--cytomine_id_software', dest="cytomine_id_software", type=int)
parser.add_argument("--cytomine_id_project", dest="cytomine_id_project", type=int)
parser.add_argument("--cytomine_id_image", dest="cytomine_id_image", type=int)
parser.add_argument("--sldc_tile_overlap", dest="sldc_tile_overlap", type=int, default=10)
parser.add_argument("--sldc_tile_width", dest="sldc_tile_width", type=int, default=768)
parser.add_argument("--sldc_tile_height", dest="sldc_tile_height", type=int, default=768)
parser.add_argument("--pyxit_model_path", dest="pyxit_model_path")
parser.add_argument("--n_jobs", dest="n_jobs", type=int, default=1)
parser.add_argument("--min_area", dest="min_area", type=int, default=500)
parser.add_argument("--threshold", dest="threshold", type=int, default=215)
parser.add_argument("--rseed", dest="rseed", type=int, default=0)
default_workflow_wpath = os.path.join(tempfile.gettempdir(), "sldc")
parser.add_argument("--working_path", dest="working_path", default=default_workflow_wpath)
params, other = parser.parse_known_args(argv)
# Initialize cytomine client
cytomine = Cytomine(
params.cytomine_host,
params.cytomine_public_key,
params.cytomine_private_key,
working_path=params.cytomine_working_path,
base_path=params.cytomine_base_path
)
if not os.path.exists(params.working_path):
os.makedirs(params.working_path)
if not os.path.exists(params.cytomine_working_path):
os.makedirs(params.cytomine_working_path)
with DemoJob(cytomine, params.cytomine_id_software, params.cytomine_id_project, params.__dict__,
params.sldc_tile_overlap, params.sldc_tile_width, params.sldc_tile_height, params.n_jobs,
params.threshold, params.min_area, params.pyxit_model_path, params.rseed, params.working_path) as job:
slide = CytomineSlide(cytomine, params.cytomine_id_image)
job.run(slide)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
apache-2.0
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Simulaters/Brianer/draft/01_ExampleDoc copy 2.py
|
2
|
2779
|
#ImportModules
import ShareYourSystem as SYS
import operator
#Definition
MyBrianer=SYS.BrianerClass(
).produce(
"Neurongroupers",
['E','I'],
SYS.NeurongrouperClass,
#Here are defined the brian classic shared arguments for each pop
{
'NeurongroupingKwargVariablesDict':
{
'model':
'''
dv/dt = (ge+gi-(v+49*mV))/(20*ms) : volt
dge/dt = -ge/(5*ms) : volt
dgi/dt = -gi/(10*ms) : volt
''',
'threshold':'v>-50*mV',
'reset':'v=-60*mV'
},
'produce':
{
'LiargVariablesList':
[
"SpikeMoniters",
['Spike'],
SYS.MoniterClass
]
}
}
).__setitem__(
'Dis_<Neurongroupers>',
#Here are defined the brian classic specific arguments for each pop
[
{
'PopulatingUnitsInt':3200,
'ConnectingGraspClueVariablesList':
map(
lambda __PrefixStr:
SYS.GraspDictClass(
{
'HintVariable':'/NodePointDeriveNoder/<Neurongroupers>'+__PrefixStr+'Neurongrouper',
'SynapsingKwargVariablesDict':
{
'pre':'ge+=1.62*mV',
},
'SynapsingProbabilityVariable':0.02
}
),
['E','I']
)
},
{
'PopulatingUnitsInt':800,
'ConnectingGraspClueVariablesList':
map(
lambda __PrefixStr:
SYS.GraspDictClass(
{
'HintVariable':'/NodePointDeriveNoder/<Neurongroupers>'+__PrefixStr+'Neurongrouper',
'SynapsingKwargVariablesDict':
{
'pre':'gi-=9*mV'
},
'SynapsingProbabilityVariable':0.02
}
),
['E','I']
)
}
]
).network(
**{
'RecruitingConcludeConditionVariable':[
(
'MroClassesList',
operator.contains,
SYS.NeurongrouperClass
)
]
}
).brian()
'''
#Definition the AttestedStr
SYS._attest(
[
'MyBrianer is '+SYS._str(
MyBrianer,
**{
'RepresentingBaseKeyStrsList':False,
'RepresentingAlineaIsBool':False
}
),
]
)
'''
'''
SYS._print(
MyBrianer.BrianedNeuronGroupsList
)
'''
'''
SYS._print(
MyBrianer.BrianedSynapsesList
)
'''
SYS._print(
[
MyBrianer.BrianedSynapsesList[0].source,
MyBrianer.BrianedSynapsesList[0].target,
MyBrianer.BrianedSynapsesList[0].pre,
MyBrianer.BrianedSynapsesList[1].source,
MyBrianer.BrianedSynapsesList[1].target,
MyBrianer.BrianedSynapsesList[1].pre
]
)
'''
SYS._print(
MyBrianer.BrianedSpikeMonitorsList
)
SYS._print(
MyBrianer.BrianedStateMonitorsList
)
'''
#init
import brian2
map(
lambda __BrianedNeuronGroup:
__BrianedNeuronGroup.__setattr__(
'v',
-60*brian2.mV
),
MyBrianer.BrianedNeuronGroupsList
)
#run
MyBrianer.run(1000)
#plot
M=MyBrianer['<Neurongroupers>ENeurongrouper']['<SpikeMoniters>SpikeMoniter'].SpikeMonitor
from matplotlib import pyplot
pyplot.plot(M.t/brian2.ms, M.i, '.')
pyplot.show()
#Print
|
mit
|
macks22/scikit-learn
|
examples/plot_multilabel.py
|
236
|
4157
|
# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
|
bsd-3-clause
|
RebeccaRapp/Bercs-Bees
|
beez.py
|
1
|
4668
|
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
def cube(length):
#Constructs a cube with side 'length'
vertices = [[0,length,0],[0,0,0],[0,0,length],[0,length,length],[0,length,0],[length,length,0],[length,0,0],[length,0,length],[length,length,length],[0,length,length],[length,length,length],[length,length,0],[length,0,0],[0,0,0],[0,0,length],[length,0,length]]
x = []
y = []
z = []
for i in vertices:
x.append(i[0])
y.append(i[1])
z.append(i[2])
return x,y,z
def edge(pos,length,velocity):
if pos[0]>length:
velocity[0] = -1*velocity[0]
#velocity[0] = -np.random.random()
pos[0] = length
if pos[0]<0:
velocity[0] = -1*velocity[0]
#velocity[0] = np.random.random()
pos[0] = 0.
if pos[1]>length:
velocity[1] = -1*velocity[1]
#velocity[1] = -np.random.random()
pos[1] = length
if pos[1]<0:
velocity[1] = -1*velocity[1]
#velocity[1] = np.random.random()
pos[1] = 0.
if pos[2]>length:
velocity[2] = -1*velocity[2]
#velocity[2] = -np.random.random()
pos[2] = length
if pos[2]<0:
velocity[2] = -1*velocity[2]
#velocity[2] = np.random.random()
pos[2] = 0.
return velocity,pos
def plotify(listofpoints):
#Takes a list of ordered triplets and converts them into lists of x's, y's, and z's. ie - matches the matplotlib formatting.
x = []
y = []
z = []
for point in listofpoints:
x.append(point[0])
y.append(point[1])
z.append(point[2])
return x,y,z
def mag(vec):
#Calculates the magnitude of a vector.
mag = 0.
for element in vec:
mag += float(element)**2
return math.sqrt(mag)
def bee(duration,dt,length,velocity = 6.70558333):
v = [np.random.random(),np.random.random(),np.random.random()]
s = mag(v)
scale = velocity/s
v = np.dot(scale,v)
t = 0.
x = [2*np.random.random()]
y = [2*np.random.random()]
z = [2*np.random.random()]
while t <= duration:
accel = [0, 32*np.sin(t), 32*np.cos(t)]
v = [v[i] + accel[i]*dt for i in range(len(v))]
s = mag(v)
scale = velocity/s
v = np.dot(scale,v)
nextx = x[-1]+v[0]*dt
nexty = y[-1]+v[1]*dt
nextz = z[-1]+v[2]*dt
if nextx >= length or nextx <= 0. or nexty >= length or nexty <= 0. or nextz >= length or nextz <= 0.:
v,pos = edge([nextx,nexty,nextz],length,v)
nextx = pos[0]
nexty = pos[1]
nextz = pos[2]
x.append(nextx)
y.append(nexty)
z.append(nextz)
t+=dt
return x,y,z
length = 2.
time = 4.
dt = 0.005 #5 mS -> 50mS to have microphone consistency
#dt = 0.001
cubex, cubey, cubez = cube(length)
mikes = [[0-0.01,0-0.01,0-0.01],[0-0.01,0-0.01,length+0.01],[0-0.01,length+0.01,0-0.01],[length+0.01,0-0.01,0-0.01],[length+0.01,length+.01,0-0.01],[0-0.01,length+0.01,length+0.01],[length+0.01,0-0.01,length+0.01],[length+0.01,length+0.01,length+0.01],[length/2.,0-0.01,0-0.01],[0-0.01,length/2.,0-0.01],[0-0.01,0-0.01,length/2.],[length+0.01,length/2.,0-0.01],[length,0-0.01,length/2.],[length+0.01,length+0.01,length/2.],[length+0.01,length/2.,length+0.01],[length/2.,length+0.01,length+0.01],[0-0.01,length+0.01,length/2.],[0-0.01,length/2.,length+0.01],[length/2.,0.-0.01,length+0.01],[length/2.,length+0.01,0.-0.01]]
mikex, mikey, mikez = plotify(mikes)
def genpath(duration,dt,length,strfile):
beex,beey,beez = bee(time,dt,length)
params = open('params.txt','w')
params.write(str(duration)+'\n')
params.write(str(length)+'\n')
params.write(str(dt)+'\n')
params.close()
flight = open(strfile,'w')
for i in range(len(beex)):
flight.write(str(beex[i])[:7]+'\t'+str(beey[i])[:7]+'\t'+str(beez[i])[:7]+'\n')
flight.close()
return beex,beey,beez
beex,beey,beez = genpath(time,dt,length,'PathFiles/beepath.txt')
fig = plt.figure(facecolor = 'white',figsize = (12,12))
ax = fig.add_subplot(111,projection = '3d')
ax.set_aspect('equal')
ax.plot(cubex,cubey,cubez,color = 'red')
ax.scatter(beex,beey,beez,color = 'blue')
ax.set_xlim(-0.02,length+0.02)
ax.set_ylim(-0.02,length+0.02)
ax.set_zlim(-0.02,length+0.02)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.scatter(mikex,mikey,mikez,color = 'green', s = 50)
fig.savefig('Tracked/box.png')
#plt.show()
|
gpl-3.0
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/userdemo/annotate_text_arrow.py
|
1
|
1701
|
"""
===================
Annotate Text Arrow
===================
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_aspect(1)
x1 = -1 + np.random.randn(100)
y1 = -1 + np.random.randn(100)
x2 = 1. + np.random.randn(100)
y2 = 1. + np.random.randn(100)
ax.scatter(x1, y1, color="r")
ax.scatter(x2, y2, color="g")
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
ax.text(-2, -2, "Sample A", ha="center", va="center", size=20,
bbox=bbox_props)
ax.text(2, 2, "Sample B", ha="center", va="center", size=20,
bbox=bbox_props)
bbox_props = dict(boxstyle="rarrow", fc=(0.8, 0.9, 0.9), ec="b", lw=2)
t = ax.text(0, 0, "Direction", ha="center", va="center", rotation=45,
size=15,
bbox=bbox_props)
bb = t.get_bbox_patch()
bb.set_boxstyle("rarrow", pad=0.6)
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
pltshow(plt)
|
mit
|
nickvandewiele/RMG-Py
|
rmgpy/cantherm/thermo.py
|
6
|
11890
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains the :class:`ThermoJob` class, used to compute and save the
thermodynamics information for a single species.
"""
import os.path
import math
import numpy.linalg
import logging
import rmgpy.constants as constants
from rmgpy.cantherm.output import prettify
from rmgpy.statmech.translation import Translation, IdealGasTranslation
from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor
from rmgpy.statmech.vibration import Vibration, HarmonicOscillator
from rmgpy.statmech.torsion import Torsion, HinderedRotor
from rmgpy.statmech.conformer import Conformer
from rmgpy.thermo.thermodata import ThermoData
from rmgpy.thermo.nasa import NASAPolynomial, NASA
from rmgpy.thermo.wilhoit import Wilhoit
################################################################################
class ThermoJob:
"""
A representation of a CanTherm thermodynamics job. This job is used to
compute and save the thermodynamics information for a single species.
"""
def __init__(self, species, thermoClass):
self.species = species
self.thermoClass = thermoClass
def execute(self, outputFile=None, plot=False):
"""
Execute the thermodynamics job, saving the results to the
given `outputFile` on disk.
"""
self.generateThermo()
if outputFile is not None:
self.save(outputFile)
if plot:
self.plot(os.path.dirname(outputFile))
def generateThermo(self):
"""
Generate the thermodynamic data for the species and fit it to the
desired heat capacity model (as specified in the `thermoClass`
attribute).
"""
if self.thermoClass.lower() not in ['wilhoit', 'nasa']:
raise Exception('Unknown thermodynamic model "{0}".'.format(self.thermoClass))
species = self.species
logging.info('Generating {0} thermo model for {1}...'.format(self.thermoClass, species))
Tlist = numpy.arange(10.0, 3001.0, 10.0, numpy.float64)
Cplist = numpy.zeros_like(Tlist)
H298 = 0.0
S298 = 0.0
conformer = self.species.conformer
for i in range(Tlist.shape[0]):
Cplist[i] += conformer.getHeatCapacity(Tlist[i])
H298 += conformer.getEnthalpy(298.) + conformer.E0.value_si
S298 += conformer.getEntropy(298.)
if not any([isinstance(mode, (LinearRotor, NonlinearRotor)) for mode in conformer.modes]):
# Monatomic species
linear = False
Nfreq = 0
Nrotors = 0
Cp0 = 2.5 * constants.R
CpInf = 2.5 * constants.R
else:
# Polyatomic species
linear = True if isinstance(conformer.modes[1], LinearRotor) else False
Nfreq = len(conformer.modes[2].frequencies.value)
Nrotors = len(conformer.modes[3:])
Cp0 = (3.5 if linear else 4.0) * constants.R
CpInf = Cp0 + (Nfreq + 0.5 * Nrotors) * constants.R
wilhoit = Wilhoit()
if Nfreq == 0 and Nrotors == 0:
wilhoit.Cp0 = (Cplist[0],"J/(mol*K)")
wilhoit.CpInf = (Cplist[0],"J/(mol*K)")
wilhoit.B = (500.,"K")
wilhoit.H0 = (0.0,"J/mol")
wilhoit.S0 = (0.0,"J/(mol*K)")
wilhoit.H0 = (H298 -wilhoit.getEnthalpy(298.15), "J/mol")
wilhoit.S0 = (S298 - wilhoit.getEntropy(298.15),"J/(mol*K)")
else:
wilhoit.fitToData(Tlist, Cplist, Cp0, CpInf, H298, S298, B0=500.0)
if self.thermoClass.lower() == 'nasa':
species.thermo = wilhoit.toNASA(Tmin=10.0, Tmax=3000.0, Tint=500.0)
else:
species.thermo = wilhoit
def save(self, outputFile):
"""
Save the results of the thermodynamics job to the file located
at `path` on disk.
"""
species = self.species
logging.info('Saving thermo for {0}...'.format(species.label))
f = open(outputFile, 'a')
f.write('# Thermodynamics for {0}:\n'.format(species.label))
H298 = species.thermo.getEnthalpy(298) / 4184.
S298 = species.thermo.getEntropy(298) / 4.184
f.write('# Enthalpy of formation (298 K) = {0:9.3f} kcal/mol\n'.format(H298))
f.write('# Entropy of formation (298 K) = {0:9.3f} cal/(mol*K)\n'.format(S298))
f.write('# =========== =========== =========== =========== ===========\n')
f.write('# Temperature Heat cap. Enthalpy Entropy Free energy\n')
f.write('# (K) (cal/mol*K) (kcal/mol) (cal/mol*K) (kcal/mol)\n')
f.write('# =========== =========== =========== =========== ===========\n')
for T in [300,400,500,600,800,1000,1500,2000,2400]:
Cp = species.thermo.getHeatCapacity(T) / 4.184
H = species.thermo.getEnthalpy(T) / 4184.
S = species.thermo.getEntropy(T) / 4.184
G = species.thermo.getFreeEnergy(T) / 4184.
f.write('# {0:11g} {1:11.3f} {2:11.3f} {3:11.3f} {4:11.3f}\n'.format(T, Cp, H, S, G))
f.write('# =========== =========== =========== =========== ===========\n')
string = 'thermo(label={0!r}, thermo={1!r})'.format(species.label, species.thermo)
f.write('{0}\n\n'.format(prettify(string)))
f.close()
f = open(os.path.join(os.path.dirname(outputFile), 'chem.inp'), 'a')
thermo = species.thermo
if isinstance(thermo, NASA):
poly_low = thermo.polynomials[0]
poly_high = thermo.polynomials[1]
# Determine the number of each type of element in the molecule
elements = ['C','H','N','O']; elementCounts = [0,0,0,0]
# Remove elements with zero count
index = 2
while index < len(elementCounts):
if elementCounts[index] == 0:
del elements[index]
del elementCounts[index]
else:
index += 1
# Line 1
string = '{0:<16} '.format(species.label)
if len(elements) <= 4:
# Use the original Chemkin syntax for the element counts
for symbol, count in zip(elements, elementCounts):
string += '{0!s:<2}{1:<3d}'.format(symbol, count)
string += ' ' * (4 - len(elements))
else:
string += ' ' * 4
string += 'G{0:<10.3f}{1:<10.3f}{2:<8.2f} 1'.format(poly_low.Tmin.value_si, poly_high.Tmax.value_si, poly_low.Tmax.value_si)
if len(elements) > 4:
string += '&\n'
# Use the new-style Chemkin syntax for the element counts
# This will only be recognized by Chemkin 4 or later
for symbol, count in zip(elements, elementCounts):
string += '{0!s:<2}{1:<3d}'.format(symbol, count)
string += '\n'
# Line 2
string += '{0:< 15.8E}{1:< 15.8E}{2:< 15.8E}{3:< 15.8E}{4:< 15.8E} 2\n'.format(poly_high.c0, poly_high.c1, poly_high.c2, poly_high.c3, poly_high.c4)
# Line 3
string += '{0:< 15.8E}{1:< 15.8E}{2:< 15.8E}{3:< 15.8E}{4:< 15.8E} 3\n'.format(poly_high.c5, poly_high.c6, poly_low.c0, poly_low.c1, poly_low.c2)
# Line 4
string += '{0:< 15.8E}{1:< 15.8E}{2:< 15.8E}{3:< 15.8E} 4\n'.format(poly_low.c3, poly_low.c4, poly_low.c5, poly_low.c6)
f.write(string)
f.close()
def plot(self, outputDirectory):
"""
Plot the heat capacity, enthapy, entropy, and Gibbs free energy of the
fitted thermodynamics model, along with the same values from the
statistical mechanics model that the thermodynamics model was fitted
to. The plot is saved to the file ``thermo.pdf`` in the output
directory. The plot is not generated if ``matplotlib`` is not installed.
"""
# Skip this step if matplotlib is not installed
try:
import pylab
except ImportError:
return
Tlist = numpy.arange(10.0, 2501.0, 10.0)
Cplist = numpy.zeros_like(Tlist)
Cplist1 = numpy.zeros_like(Tlist)
Hlist = numpy.zeros_like(Tlist)
Hlist1 = numpy.zeros_like(Tlist)
Slist = numpy.zeros_like(Tlist)
Slist1 = numpy.zeros_like(Tlist)
Glist = numpy.zeros_like(Tlist)
Glist1 = numpy.zeros_like(Tlist)
conformer = self.species.conformer
thermo = self.species.thermo
for i in range(Tlist.shape[0]):
Cplist[i] = conformer.getHeatCapacity(Tlist[i])
Slist[i] = conformer.getEntropy(Tlist[i])
Hlist[i] = (conformer.getEnthalpy(Tlist[i]) + conformer.E0.value_si) * 0.001
Glist[i] = Hlist[i] - Tlist[i] * Slist[i] * 0.001
Cplist1[i] = thermo.getHeatCapacity(Tlist[i])
Slist1[i] = thermo.getEntropy(Tlist[i])
Hlist1[i] = thermo.getEnthalpy(Tlist[i]) * 0.001
Glist1[i] = thermo.getFreeEnergy(Tlist[i]) * 0.001
fig = pylab.figure(figsize=(10,8))
pylab.subplot(2,2,1)
pylab.plot(Tlist, Cplist / 4.184, '-r', Tlist, Cplist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Heat capacity (cal/mol*K)')
pylab.legend(['statmech', 'thermo'], loc=4)
pylab.subplot(2,2,2)
pylab.plot(Tlist, Slist / 4.184, '-r', Tlist, Slist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Entropy (cal/mol*K)')
pylab.subplot(2,2,3)
pylab.plot(Tlist, Hlist / 4.184, '-r', Tlist, Hlist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Enthalpy (kcal/mol)')
pylab.subplot(2,2,4)
pylab.plot(Tlist, Glist / 4.184, '-r', Tlist, Glist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Gibbs free energy (kcal/mol)')
fig.subplots_adjust(left=0.10, bottom=0.08, right=0.95, top=0.95, wspace=0.35, hspace=0.20)
pylab.savefig(os.path.join(outputDirectory, 'thermo.pdf'))
pylab.close()
|
mit
|
sanguinariojoe/aquagpusph
|
examples/3D/spheric_testcase2_dambreak_mpi/cMake/plot_t.py
|
14
|
5704
|
#******************************************************************************
# *
# * ** * * * * *
# * * * * * * * * * *
# ***** * * * * ***** ** *** * * ** *** *** *
# * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * *
# * * ** * ** * * *** *** *** ** *** * * *
# * * * *
# ** * * *
# *
#******************************************************************************
# *
# This file is part of AQUAgpusph, a free CFD program based on SPH. *
# Copyright (C) 2012 Jose Luis Cercos Pita <[email protected]> *
# *
# AQUAgpusph is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# AQUAgpusph is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. *
# *
#******************************************************************************
import math
import os
from os import path
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def readFile(filepath):
""" Read and extract data from a file
:param filepath File ot read
"""
abspath = filepath
if not path.isabs(filepath):
abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
# Read the file by lines
f = open(abspath, "r")
lines = f.readlines()
f.close()
data = []
for l in lines[1:-1]: # Skip the last line, which may be unready
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
fields = l.split(' ')
try:
data.append(map(float, fields))
except:
continue
# Transpose the data
return [list(d) for d in zip(*data)]
fig = plt.figure()
ax = fig.add_subplot(111)
t = [0.0]
e = [0.0]
fove = ax.fill_between(t,
0,
e,
facecolor='red',
linewidth=0.0)
fave = ax.fill_between(t,
0,
e,
facecolor='blue',
linestyle="-",
linewidth=0.0)
love, = ax.plot(t,
e,
color='#990000',
linestyle="-",
linewidth=2.0,
label='Average overhead')
lave, = ax.plot(t,
e,
color='#000099',
linestyle="-",
linewidth=2.0,
label='Average elapsed')
line, = ax.plot(t,
e,
color="black",
linestyle="-",
linewidth=1.0,
alpha=0.5,
label='Elapsed')
def update(frame_index):
plt.tight_layout()
data = readFile('Performance.dat')
t = data[0]
e = data[1]
e_ela = data[2]
e_ove = data[5]
# Clear nan values
for i in range(len(e_ela)):
if math.isnan(e_ela[i]):
e_ela[i] = 0.0
if math.isnan(e_ove[i]):
e_ove[i] = 0.0
e_ave = [e_ela[i] - e_ove[i] for i in range(len(e_ela))]
# clear the fills
for coll in (ax.collections):
ax.collections.remove(coll)
fove = ax.fill_between(t,
0,
e_ela,
facecolor='red',
linestyle="-",
linewidth=2.0)
fave = ax.fill_between(t,
0,
e_ave,
facecolor='blue',
linestyle="-",
linewidth=2.0)
love.set_data(t, e_ela)
lave.set_data(t, e_ave)
line.set_data(t, e)
ax.set_xlim(0, t[-1])
ax.set_ylim(0, 1.5 * e_ela[-1])
# Set some options
ax.grid()
ax.set_xlim(0, 0.1)
ax.set_ylim(-0.1, 0.1)
ax.set_autoscale_on(False)
ax.set_xlabel(r"$t \, [\mathrm{s}]$", fontsize=21)
ax.set_ylabel(r"$t_{CPU} \, [\mathrm{s}]$", fontsize=21)
ax.legend(handles=[lave, love, line], loc='upper right')
ani = animation.FuncAnimation(fig, update, interval=5000)
plt.show()
|
gpl-3.0
|
h2educ/scikit-learn
|
examples/model_selection/plot_precision_recall.py
|
249
|
6150
|
"""
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
LEX2016WoKaGru/pyClamster
|
scripts/calibration/fe3/FE3_calibration_proj_sun_img_real_coordinates_determination_pickling.py
|
1
|
2835
|
# -*- coding: utf-8 -*-
"""
Created on 14.08.16
Created for pyclamster
Copyright (C) {2016}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# System modules
import os,sys,gc
import pickle
import logging
import pytz,datetime
# External modules
import matplotlib.pyplot as plt
import numpy as np
# Internal modules
import pyclamster
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
LAT=54.4947
LON=11.2408
session = pyclamster.CameraSession(
images="/home/yann/Studium/LEX/LEX/cam/cam3/calibration/projection/FE3*.jpg",
longitude=LON,latitude=LAT
)
imgsunxs = []
imgsunys = []
realsunazis = []
realsuneles = []
for image in session: # loop over all images
# get time
imgtime = image._get_time_from_filename(fmt="FE3_Image_%Y%m%d_%H%M%S_UTCp1.jpg")
imgtime = pytz.utc.localize(imgtime)
imgtime = imgtime - datetime.timedelta(hours=1)
image.time = imgtime
# get sun position
imgsunpos = image.getImageSunPosition()
imgsunx = imgsunpos[1]
imgsuny = image.data.shape[0] - imgsunpos[0] # invert y axis
realsunazi = image.getRealSunAzimuth()
realsunele = image.getRealSunElevation()
# print
logger.debug("Path: {}".format(image.path))
logger.debug("Time: {}".format(imgtime))
logger.debug("ImageSunPos: {}".format(imgsunpos))
logger.debug("RealSunAzi: {}".format(realsunazi))
logger.debug("RealSunEle: {}".format(realsunele))
#plt.imshow(image.data)
#plt.scatter(x=imgsunpos[1],y=imgsunpos[0])
#plt.show()
#sys.stdin.read(1) # pause
# merge data
imgsunxs.append(imgsunx)
imgsunys.append(imgsuny)
realsunazis.append(realsunazi)
realsuneles.append(realsunele)
del image;gc.collect() # delete and free memory
# merge information and save to files
sun_img=pyclamster.Coordinates3d(x=imgsunxs,y=imgsunys,azimuth_offset=0,azimuth_clockwise=False)
pickle.dump(sun_img,open("data/FE3_projcalib_sun_img.pk","wb"))
sun_real=pyclamster.Coordinates3d(azimuth=realsunazis,elevation=realsuneles,azimuth_clockwise=True,azimuth_offset=3/2*np.pi)
pickle.dump(sun_real,open("data/FE3_projcalib_sun_real.pk","wb"))
|
gpl-3.0
|
yeatmanlab/pyAFQ
|
examples/plot_recobundles.py
|
2
|
6554
|
"""
=========================================
Plotting tract profiles using RecoBundles
=========================================
An example of tracking and segmenting two tracts with RecoBundles
[Garyfallidis2017]_, and plotting their tract profiles for FA (calculated with
DTI).
See `plot_tract_profile` for explanations of each stage here. The main
difference here is that segmentation uses the RecoBundles algorithm, instead of
the AFQ waypoint ROI approach.
"""
import os.path as op
import matplotlib.pyplot as plt
import numpy as np
import nibabel as nib
import dipy.data as dpd
from dipy.data import fetcher
import dipy.tracking.utils as dtu
import dipy.tracking.streamline as dts
from dipy.io.streamline import save_tractogram, load_tractogram
from dipy.stats.analysis import afq_profile, gaussian_weights
from dipy.io.stateful_tractogram import StatefulTractogram
from dipy.io.stateful_tractogram import Space
from dipy.align import affine_registration
import AFQ.data as afd
import AFQ.tractography as aft
import AFQ.registration as reg
import AFQ.models.dti as dti
import AFQ.segmentation as seg
import AFQ.api as api
# Target directory for this example's output files
working_dir = "./recobundles"
dpd.fetch_stanford_hardi()
hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
img = nib.load(hardi_fdata)
print("Calculating DTI...")
if not op.exists(op.join(working_dir, 'dti_FA.nii.gz')):
dti_params = dti.fit_dti(hardi_fdata, hardi_fbval, hardi_fbvec,
out_dir=working_dir)
else:
dti_params = {'FA': op.join(working_dir, 'dti_FA.nii.gz'),
'params': op.join(working_dir, 'dti_params.nii.gz')}
FA_img = nib.load(dti_params['FA'])
FA_data = FA_img.get_fdata()
print("Registering to template...")
MNI_T2_img = afd.read_mni_template()
if not op.exists(op.join(working_dir, 'mapping.nii.gz')):
import dipy.core.gradients as dpg
gtab = dpg.gradient_table(hardi_fbval, hardi_fbvec)
b0 = np.mean(img.get_fdata()[..., gtab.b0s_mask], -1)
# Prealign using affine registration
_, prealign = affine_registration(
b0,
MNI_T2_img.get_fdata(),
img.affine,
MNI_T2_img.affine)
# Then register using a non-linear registration using the affine for
# prealignment
warped_hardi, mapping = reg.syn_register_dwi(hardi_fdata, gtab,
prealign=prealign)
reg.write_mapping(mapping, op.join(working_dir, 'mapping.nii.gz'))
else:
mapping = reg.read_mapping(op.join(working_dir, 'mapping.nii.gz'),
img, MNI_T2_img)
bundle_names = ["CST", "UF", "CC_ForcepsMajor", "CC_ForcepsMinor", "OR", "VOF"]
bundles = api.BundleDict(bundle_names, seg_algo="reco80")
print("Tracking...")
if not op.exists(op.join(working_dir, 'dti_streamlines_reco.trk')):
seed_roi = np.zeros(img.shape[:-1])
for bundle in bundles:
if bundle != 'whole_brain':
sl_xform = dts.Streamlines(
dtu.transform_tracking_output(bundles[bundle]['sl'],
MNI_T2_img.affine))
delta = dts.values_from_volume(mapping.backward,
sl_xform, np.eye(4))
sl_xform = [sum(d, s) for d, s in zip(delta, sl_xform)]
sl_xform = dts.Streamlines(
dtu.transform_tracking_output(
sl_xform,
np.linalg.inv(MNI_T2_img.affine)))
sft = StatefulTractogram(sl_xform, img, Space.RASMM)
save_tractogram(sft, op.join(working_dir, f'{bundle}_atlas.trk'))
sl_xform = dts.Streamlines(
dtu.transform_tracking_output(
sl_xform,
np.linalg.inv(img.affine)))
for sl in sl_xform:
sl_as_idx = sl.astype(int)
seed_roi[sl_as_idx[:, 0],
sl_as_idx[:, 1],
sl_as_idx[:, 2]] = 1
nib.save(nib.Nifti1Image(seed_roi, img.affine),
op.join(working_dir, 'seed_roi.nii.gz'))
sft = aft.track(dti_params['params'], seed_mask=seed_roi,
directions='det', stop_mask=FA_data,
stop_threshold=0.1)
print(len(sft.streamlines))
save_tractogram(sft, op.join(working_dir, 'dti_streamlines_reco.trk'),
bbox_valid_check=False)
else:
sft = load_tractogram(op.join(working_dir, 'dti_streamlines_reco.trk'),
img)
print("Segmenting fiber groups...")
segmentation = seg.Segmentation(seg_algo='reco80',
rng=np.random.RandomState(2))
segmentation.segment(bundles,
sft,
fdata=hardi_fdata,
fbval=hardi_fbval,
fbvec=hardi_fbvec,
mapping=mapping,
reg_template=MNI_T2_img)
fiber_groups = segmentation.fiber_groups
for kk in fiber_groups:
print(kk, len(fiber_groups[kk]))
sft = StatefulTractogram(fiber_groups[kk].streamlines,
img,
Space.RASMM)
save_tractogram(sft, op.join(working_dir, '%s_reco.trk' % kk),
bbox_valid_check=False)
print("Extracting tract profiles...")
for bundle in bundles:
if bundle != 'whole_brain':
fig, ax = plt.subplots(1)
sft = load_tractogram(
op.join(working_dir, f'{bundle}_reco.trk'),
img,
to_space=Space.VOX,
bbox_valid_check=False)
weights = gaussian_weights(sft.streamlines)
profile = afq_profile(FA_data, sft.streamlines,
np.eye(4), weights=weights)
ax.plot(profile)
ax.set_title(bundle)
plt.show()
##########################################################################
# References:
# -------------------------
# .. [Garyfallidis2017] Garyfallidis, Eleftherios, Marc-Alexandre Côté,
# Francois Rheault, Jasmeen Sidhu, Janice Hau, Laurent
# Petit, David Fortin, Stephen Cunanne, and Maxime
# Descoteaux. 2017.“Recognition of White Matter Bundles
# Using Local and Global Streamline-Based Registration and
# Clustering.”NeuroImage 170: 283-295.
|
bsd-2-clause
|
policycompass/policycompass-services
|
apps/datasetmanager/dataset_data.py
|
2
|
11547
|
import json
import pandas as p
import numpy
import voluptuous as v
from collections import OrderedDict
from django.core.exceptions import ValidationError
from apps.referencepool.models import Individual, DataClass
from rest_framework import exceptions
from .time_resolutions import TimeResolutions
import logging
log = logging.getLogger(__name__)
__author__ = 'fki'
trl = TimeResolutions()
class TransformationException(exceptions.APIException):
status_code = 400
class DatasetData(object):
"""
Wraps the pandas DataFrame to hold the tabular data
of a dataset. Offers several transformation methods.
"""
def __init__(self, data_frame: p.DataFrame, unit: int, resolution: str):
self.unit = unit
self.resolution = resolution
self.df = data_frame
self.time_transformed = False
self.time_filtered = False
self.unit_transformed = False
def get_json(self) -> str:
"""
Get the data as JSON string
"""
result = {
'unit': self.unit,
'resolution': self.resolution,
'data_frame': self.df.to_json(date_format='iso')
}
return json.dumps(result)
@staticmethod
def from_json(data: str):
"""
Create a DatasetObject from a JSON string
"""
obj = json.loads(data)
h = p.read_json(obj['data_frame'])
h.sort_index(inplace=True)
dataset_data = DatasetData(h, obj['unit'], obj['resolution'])
return dataset_data
def transform_time(self, time_resolution: str):
"""
Transforms the time series into another valid
resolution
"""
if not trl.is_supported(time_resolution):
raise TransformationException(
"Time Resolution not supported. Options: " + str(
trl.get_supported_names()))
time_obj = trl.get(time_resolution)
orig_time_obj = trl.get(self.resolution)
if time_obj.level < orig_time_obj.level:
raise TransformationException(
"Upscaling of the time resolution is not supported.")
if time_obj != orig_time_obj:
self.df = self.df.resample(time_obj.offset, how='mean')
self.resolution = time_obj.name
self.time_transformed = True
def filter_by_time(self, start_time: str, end_time: str):
"""
Filters the data by the given time interval.
"""
real_start_time = self.get_time_start()
real_end_time = self.get_time_end()
if end_time and end_time < real_start_time:
raise TransformationException(
"The given end time is less than the start time"
)
if start_time and start_time > real_end_time:
raise TransformationException(
"The given start time is greater than the end time"
)
if start_time and end_time:
if start_time > end_time:
raise TransformationException(
"The start time cannot be greater than the end time."
)
try:
self.df = self.df.ix[start_time:end_time]
self.time_filtered = True
except p.datetools.DateParseError:
raise TransformationException(
"The time parameters are malformed. "
"Please provide a valid date string"
)
def filter_by_individuals(self, individuals: list):
"""
Filters the dataframe by a given list of individuals.
Raises an exception when individual is not available.
"""
available_individuals = self.get_individuals()
filter_inds = []
for individual in individuals:
i = int(individual)
if i not in available_individuals:
raise TransformationException(
"The selected individuals or not valid.")
else:
filter_inds.append(i)
self.df = self.df[filter_inds]
log.debug(self.df)
pass
def get_individuals(self) -> list:
"""
Returns all available individuals
as a list of integers
"""
result = self.df.columns.values
return [int(x) for x in result]
def get_time_start(self) -> str:
time_obj = trl.get(self.resolution)
if len(self.df.index) == 1:
return time_obj.output_expr(self.df.index[0])
else:
return time_obj.output_expr(self.df.index[0])
def get_time_end(self) -> str:
time_obj = trl.get(self.resolution)
if len(self.df.index) == 1:
return time_obj.output_expr(self.df.index[0])
else:
return time_obj.output_expr(self.df.index[-1])
class DatasetDataTransformer(object):
"""
Wraps the transformer conveniently
"""
@staticmethod
def from_api(data_dict: dict,
time_start: str,
time_end: str,
time_resolution: str,
class_id: int,
unit_id: int) -> DatasetData:
trf = DatasetDataFromAPITransformer(data_dict, time_resolution,
time_start, time_end, class_id,
unit_id)
return trf.get_dataset_data()
@staticmethod
def to_api(dataset_data: DatasetData) -> dict:
trf = DatasetDataToAPITransformer(dataset_data)
return trf.get_api_data()
class DatasetDataFromAPITransformer(object):
"""
Generates a DatasetData Object from the datastructure
used within the API
"""
def __init__(self, data: dict, time_resolution: str, time_start: str,
time_end: str, class_id: int, unit_id: int):
self.data = data
self.time_resolution = time_resolution
self.time_start = time_start
self.time_end = time_end
self.class_id = class_id
self.unit_id = unit_id
self._dataset_data = None
self._create_individuals()
self._pre_validate()
self.df = self._transform()
self._dataset_data = DatasetData(
self.df,
self.unit_id,
self.time_resolution
)
def get_dataset_data(self) -> DatasetData:
"""
Returns the actual DatasetData object
"""
return self._dataset_data
def _pre_validate(self):
"""
Performs a pre-validation if the data
"""
schema = self._get_validation_schema()
if not isinstance(self.data, dict):
raise ValidationError("Data field needs to be a dictionary.")
try:
schema(self.data)
except v.Invalid as e:
raise ValidationError(e)
def _get_validation_schema(self) -> v.Schema:
"""
Defines the validation schema
"""
def validate_individual(value):
if self.class_id != 7:
if not isinstance(value, int):
raise v.Invalid(
"Individual of Element %d of data.table is not an integer."
" Please use class 'custom' to provide strings.")
schema = v.Schema({
v.Required('table', msg="Data dict needs a 'table' field."): v.All(
[
{
v.Required('row'): int,
v.Required('individual'): validate_individual,
v.Required('values'): v.All(
dict
),
}
], v.Length(min=1))
})
return schema
def _create_individuals(self):
table = self.data['table']
for row in table:
individual = row['individual']
if isinstance(individual, str):
# Does it exist already
# Todo Make this better
try:
saved_ind = Individual.objects.get(title=individual, data_class=self.class_id)
row['individual'] = saved_ind.id
except Individual.DoesNotExist:
ind = Individual(
title=individual,
data_class=DataClass.objects.get(id=7)
)
ind.save()
row['individual'] = ind.id
def _transform(self) -> p.DataFrame:
"""
Returns the transformed data as pandas DataFrame, which
is needed to init the DatasetData
"""
if not trl.is_supported(self.time_resolution):
raise ValidationError(
"The Specified time_resolution is not supported.")
time_obj = trl.get(self.time_resolution)
time_range = self._create_time_range()
f = p.DataFrame(index=time_range)
table = self.data['table']
for row in table:
values = []
for time in time_range:
try:
values.append(row['values'][time_obj.output_expr(time)])
except KeyError:
raise ValidationError(
"Please provide a value for every date within the range and resolution.")
f[row['individual']] = values
return f
def _create_time_range(self) -> p.DatetimeIndex:
"""
Creates a time index based on the metadata
"""
time_obj = trl.get(self.time_resolution)
start = time_obj.input_expr(self.time_start)
end = time_obj.input_expr(self.time_end)
return p.date_range(start, end, freq=time_obj.offset)
class DatasetDataToAPITransformer(object):
"""
Generates the API datastructure from a DatasetData object
"""
def __init__(self, dataset_data: DatasetData):
self._dataset_data = dataset_data
self._view_data = self._transform()
def get_api_data(self) -> dict:
"""
Returns the actual dict for the view
"""
result = {
'table': self._view_data,
'individuals': self._dataset_data.get_individuals()
}
if self._dataset_data.time_transformed:
result['time_transformation'] = {
'resolution': self._dataset_data.resolution,
'start': self._dataset_data.get_time_start(),
'end': self._dataset_data.get_time_end(),
'method': 'mean'
}
if self._dataset_data.time_filtered:
result['time_filter'] = {
'start': self._dataset_data.get_time_start(),
'end': self._dataset_data.get_time_end(),
}
return result
def _transform(self) -> dict:
"""
Transforms the DatasetData into a dict
"""
time_obj = trl.get(self._dataset_data.resolution)
result = []
row = 1
for index, i in self._dataset_data.df.iteritems():
new_dict = OrderedDict()
new_dict['row'] = row
new_dict['individual'] = int(index)
new_dict['values'] = OrderedDict()
for index2, j in i.iteritems():
if numpy.isnan(j):
new_dict['values'][time_obj.output_expr(index2)] = None
else:
new_dict['values'][time_obj.output_expr(index2)] = round(j, 2)
result.append(new_dict)
row += 1
return result
|
agpl-3.0
|
dialounke/pylayers
|
pylayers/gis/readvrml.py
|
1
|
21434
|
# -*- coding: utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.readvrml
.. autosummary::
"""
import doctest
import os
import glob
import os
import doctest
import glob
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from pylayers.util import geomutil as geo
from pylayers.util import pyutil as pyu
from pylayers.util.project import *
from descartes.patch import PolygonPatch
import shapely.geometry as shg
import shapely.ops as sho
import networkx as nx
from pylayers.gis.layout import Layout
def savelay(dpt,dwall, _filename='struc.lay'):
""" save walls in lay format
.. TODO
to implement
The default filename is struc.str2
Parameters
----------
dpt :
dwall:
"""
fd = open('struc.str2', 'w')
s1 = str(len(dpt.keys()))+' '+str(len(dwall.keys()))+' 0\n'
fd.write(s1)
for ipt in dpt:
p = dpt[ipt]
s1 = str(p[0])+' '+str(p[1])+' 0 0 0 0\n'
fd.write(s1)
for iw in dwall:
tail = dwall[iw]['tail']+1
head = dwall[iw]['head']+1
zmin = dwall[iw]['zmin']
zmax = dwall[iw]['zmax']
core = str(2)
s1 = str(tail)+' '+str(
head)+' 1 '+core+' '+'1 0 '+str(zmin)+' '+str(zmax)+'\n'
fd.write(s1)
fd.close()
def stretch(s, alphat=0.1, alphah=0.1):
""" strech a LineString by a given perc on both terminations
Parameters
----------
s : LineString shapely
alphat : stretching coeff tail
alphah : stretching coeff head
Returns
-------
ss : streched segment
Examples
--------
>>> s1 = shg.LineString(((0,0),(1,0)))
>>> ss1 = stretch(s1,0.1)
>>> s2 = shg.LineString(((-0.1,0),(1.1,0)))
>>> assert (ss1.equals(s2))
"""
ls = s.length
x, y = s.xy
u = np.array((x[1]-x[0], y[1]-y[0]))
un = u/ls
pt = np.array((x[0], y[0]))
ph = np.array((x[1], y[1]))
# ppt = pt - un*ls*alpha
# pph = ph + un*ls*alpha
ppt = pt - un*alphat
pph = ph + un*alphah
ss = shg.LineString(((ppt[0], ppt[1]), (pph[0], pph[1])))
return(ss)
def segsplit(s1, s2, tol=0.0001, alpha=0.1):
""" split segment
Parameters
----------
s1 : shapely LineString
s2 : shapely LineString
tol : tolerance for point equality test
alpha : stretching factor
Returns
-------
ts : list of segment
bks1 : boolean keep s1
bks2 : boolean keep s2
Examples
--------
>>> s1 = shg.LineString(((0,0),(1,0)))
>>> s2 = shg.LineString(((1,0),(2,0)))
>>> s3 = shg.LineString(((1,-10),(1,10)))
>>> s4 = shg.LineString(((0.5,-10),(0.5,10)))
>>> ts1 = segsplit(s1,s2)
>>> ts2 = segsplit(s1,s3)
>>> ts3 = segsplit(s1,s4)
"""
ts1 = []
ts2 = []
bks1 = True
bks2 = True
beta = alpha/(2*alpha+1)
if s1.intersects(s2):
p1t, p1h = s1.boundary
p2t, p2h = s2.boundary
ls1 = s1.length
ls2 = s2.length
pi = s1.intersection(s2)
if s1.touches(s2): # touching segments
if not (pi.equals_exact(p1t, tol) or pi.equals_exact(p1h, tol)):
s11 = shg.LineString(((p1t.xy[0][0], p1t.xy[
1][0]), (pi.xy[0][0], pi.xy[1][0])))
s12 = shg.LineString(((pi.xy[0][0], pi.xy[
1][0]), (p1h.xy[0][0], p1h.xy[1][0])))
if s11.length > 0 and s11.length >= alpha:
ts1.append(s11)
if s12.length > 0 and s12.length >= alpha:
ts1.append(s12)
bks1 = False
if not (pi.equals_exact(p2t, tol) or pi.equals_exact(p2t, tol)):
s21 = shg.LineString(((p2t.xy[0][0], p2t.xy[
1][0]), (pi.xy[0][0], pi.xy[1][0])))
s22 = shg.LineString(((pi.xy[0][0], pi.xy[
1][0]), (p2h.xy[0][0], p2h.xy[1][0])))
if s21.length > 0 and s21.length > alpha:
ts2.append(s21)
if s22.length > 0 and s22.length >= alpha:
ts2.append(s22)
bks2 = False
else: # crossing segments
s11 = shg.LineString(((p1t.xy[0][0], p1t.xy[
1][0]), (pi.xy[0][0], pi.xy[1][0])))
s12 = shg.LineString(((pi.xy[0][0], pi.xy[
1][0]), (p1h.xy[0][0], p1h.xy[1][0])))
s21 = shg.LineString(((p2t.xy[0][0], p2t.xy[
1][0]), (pi.xy[0][0], pi.xy[1][0])))
s22 = shg.LineString(((pi.xy[0][0], pi.xy[
1][0]), (p2h.xy[0][0], p2h.xy[1][0])))
ls11 = s11.length
ls12 = s12.length
ls21 = s21.length
ls22 = s22.length
if ls11 > ls12:
ts1.append(s11)
else:
ts1.append(s12)
if ls21 > ls22:
ts2.append(s21)
else:
ts2.append(s22)
# if s11.length>0 and s11.length>=alpha:
# ts1.append(s11)
# if s12.length>0 and s12.length>=alpha:
# ts1.append(s12)
# if s21.length>0 and s21.length>=alpha:
# ts2.append(s21)
# if s22.length>0 and s21.length>=alpha:
# ts2.append(s22)
bks1 = False
bks2 = False
return(ts1, ts2, bks1, bks2)
def extract(vrmlstrg, dico):
""" converts recursively a vrml string into a dictionnary
Parameters
----------
vrmlstrg:
dico :
Returns
------
dico : dictonnary associated with vrml string strg
"""
val = vrmlstrg
while len(val) != 0:
key, val = inbracket(val)
if val != '':
dico[key] = val
dico = extract(val, dico)
return(dico)
def inbracket(strg):
""" extraction of bracket content
Parameters
----------
strg : a string with a bracket
Returns
-------
lbra : left part of the string
inbr : string inside the bracket
Examples
--------
>>> strg ='abcd{un texte}'
>>> lbra,inbr = inbracket(strg)
>>> assert(lbra=='abcd')
>>> assert(inbr=='un texte')
"""
strg = strg.replace('\r', '')
ssp = strg.split('{')
lbra = ssp[0]
rbra = ''
inbr = ''
for k in ssp[1:]:
rbra = rbra+k+'{'
rsp = rbra.split('}')
for k in rsp[:-1]:
inbr = inbr+k+'}'
inbr = inbr.rstrip('}')
return(lbra, inbr)
def incrochet(strg):
""" get content inside crochet
Parameters
----------
strg : string
Returns
-------
lbra : left part of the string
inbr : string inside the bracket
Examples
--------
>>> strg ='abcd[un texte]'
>>> lbra,inbr = incrochet(strg)
>>> assert(lbra=='abcd')
>>> assert(inbr=='un texte')
"""
strg = strg.replace('\r', '')
ssp = strg.split('[')
lbra = ssp[0]
rbra = ''
inbr = ''
for k in ssp[1:]:
rbra = rbra+k+'['
rsp = rbra.split(']')
for k in rsp[:-1]:
inbr = inbr+k+']'
inbr = inbr.rstrip(']')
return(lbra, inbr)
def geomLine(st):
""" build a Line from string
Parameters
----------
st : string
Returns
-------
tabindex : array of indexes
tcoord : array of coordinates
"""
st1 = st.split('coordIndex')
index = st1[1]
index = index.replace('[', '')
index = index.replace(']', '')
index = index.replace(' ', '')
tindex = index.split(',')
tabindex = []
for st in tindex[:-1]:
tabindex.append(int(st))
tabindex = np.array(tabindex).reshape(len(tabindex)/3, 3)
a, b = inbracket(st1[0])
c = incrochet(b)[1]
c = c.split(',')
coord = []
for st in c[:-1]:
pt = st.split(' ')
for ic in pt:
coord.append(float(ic))
tcoord = np.array(coord).reshape(len(coord)/3, 3)
return(tabindex, tcoord)
def geomFace(st):
""" build a Face from string
Parameters
----------
st : string
Returns
-------
tabindex
tcoord : ndarray
"""
st1 = st.split('coordIndex')
index = st1[1]
index = index.replace('[', '')
index = index.replace(']', '')
index = index.replace(' ', '')
tindex = index.split(',')
tabindex = []
for st in tindex[:-1]:
tabindex.append(int(st))
tabindex = np.array(tabindex)
a, b = inbracket(st1[0])
c = incrochet(b)[1]
c = c.split(',')
coord = []
for st in c[:-1]:
pt = st.split(' ')
for ic in pt:
coord.append(float(ic))
tcoord = np.array(coord).reshape(len(coord)/3, 3)
return(tabindex, tcoord)
def ParseDirectionalLight(st):
"""
"""
d = {}
s1 = st.split('intensity')
t = s1[0]
st1 = t.split(' ')
d['on'] = bool(st1[1])
t = s1[1]
s2 = t.split('ambientIntensity')
d['intensity'] = float(s2[0])
t = s2[1]
s2 = t.split('color')
d['ambientIntensity'] = float(s2[0])
t = s2[1]
s2 = t.split('direction')
d['color'] = s2[0]
d['direction'] = s2[1]
return(d)
def ParseMaterial(st):
"""
"""
st = st.replace('material', '')
st = st.replace('Material', '')
st = st.replace('{', '')
dst = st.split(',')
d = {}
for s in dst:
# print s
u = s.split(' ')
# print u
sp1 = st.split('diffuseColor')
t = sp1[1]
ts = t.split(',')
print(ts)
d['diffuseColor'] = ts[0]
try:
d['specularColor'] = ts[1].split('specularColor')[1]
d['shininess'] = ts[2].split('shininess')[1]
d['transparency'] = ts[3].split('transparency')[1]
except:
d['specularColor'] = ts[2].split('specularColor')[1]
d['shininess'] = ts[3].split('shininess')[1]
d['transparency'] = ts[4].split('transparency')[1]
return(d)
def show(dico):
""" show dico
"""
for key in dico.keys():
if key != 'name':
plt.plot(dico[key]['coord'][:, 0], dico[key]['coord'][:, 2])
else:
print dico[key]
plt.show()
def parsevrml(filename):
""" parse a vrml file
Parameters
----------
filename : vrml filename
Returns
-------
dg : dictionnaries of group
"""
fd = open(filename, 'r')
lignes = fd.readlines()
fd.close()
tobj = {}
tnum = {}
k = -1
for li in lignes:
li = li.replace('\n', '')
li = li.replace('\r,', '') # line finishing with a comma
if li.find('DEF') != -1: # if line contains DEF keyword new dictionnary entry
k = k+1
tmp = li.split(
' ') # split with space the index 1 is the dictionnary entry name
obid = tmp[1]
tobj[k] = li
tnum[k] = obid # name of the entry
else:
try:
tobj[k] = tobj[
k]+li # concatenation of the line in the current dictionnary entry k
except:
pass
td = {}
dg = {} # list of groups
targetkey = ' geometry IndexedLineSet '
targetkey = ' geometry IndexedFaceSet '
for on in tnum.keys():
dico = {}
ob = tnum[on]
val = tobj[on]
dico = extract(val, dico)
if ob.find('LIGHT') != -1:
td[ob] = ParseDirectionalLight(dico.values()[0])
if ob.find('APPEARANCE') != -1:
td[ob] = ParseMaterial(dico.values()[0])
if val.find('Group') != -1:
try:
# tg.append(g) # save previous group
dg[name] = g
except:
pass
g = {}
name = val.split('Group')[0].split(' ')[1] # name of the group
if ob.find('ID_') != -1:
st = dico[targetkey]
i, c = geomFace(st)
g[tnum[on]] = {'index': i, 'coord': c}
dg[name] = g
return(dg)
def vrml2sha(tg):
""" convert vrml object into shapely polygons
Parameters
----------
tg : list of objects
"""
for l in tg:
for k in l.keys():
if k != 'name':
c = l[k]['coord']
i = l[k]['index']
tt = []
ltt = []
for u in i:
if u == -1: # -1 closure indicator
ltt.append(tt)
tt = []
else:
tt.append(u)
P = geo.Polygon(c)
def vrml2geom(tg, rac):
""" convert vrml object into geomview files
Parameters
----------
tg : list of objects
rac : filename prefix
"""
for l in tg:
# prepare geomview file
fi1 = rac+l['name']
_fi1 = fi1+'.list'
fina = pyu.getlong(_fi1, 'geom')
fd = open(fina, 'w')
fd.write('LIST')
for k in l.keys():
if k != 'name':
filename = fi1+'-'+str(k)
fd.write('{<'+filename+'.off}\n')
G = geo.Geomoff(filename)
c = l[k]['coord']
i = l[k]['index']
tt = []
ltt = []
for u in i:
if u == -1:
ltt.append(tt)
tt = []
else:
tt.append(u)
# build a geomview list of polygons
G.polygons(c, ltt)
fd.close()
class VLayout(PyLayers):
def load(self, filename):
"""
Parameters
----------
filename : str
"""
dg = parsevrml(filename)
self.entity = {}
for t in dg: # WALL, COLUMN, DOOR , STAIR , SPACE
self.entity[t] = {}
k = 0
for ID in dg[t]:
c = dg[t][ID]['coord']
self.entity[t][k] = {}
self.entity[t][k]['ID'] = ID
self.entity[t][k]['coord'] = c
l = dg[t][ID]['index']
dp = {}
p = []
kk = 0
for il in l:
if il == -1:
dp[kk] = p
p = []
kk = kk + 1
else:
p.append(il)
self.entity[t][k]['index'] = dp
k = k + 1
#
# Simplify Coord (Projection in 0xy plane)
#
for g in self.entity:
for ID in self.entity[g]:
x = self.entity[g][ID]['coord'][:, 0]
y = -self.entity[g][ID]['coord'][:, 2]
z = self.entity[g][ID]['coord'][:, 1]
tp = np.vstack((x, y))
Np = np.shape(tp)[1]
tp2 = {}
already = False
iop = 0
for ip in range(Np):
p = tp[:, ip]
for iold in tp2.keys():
pold = tp2[iold]['coord']
if np.shape(pold) == (2,):
dist = np.dot(p-pold, p-pold)
if dist < 1e-15:
already = True
tp2[iold]['nump'].append(ip)
if not already:
tp2[iop] = {}
tp2[iop]['coord'] = p
tp2[iop]['nump'] = [ip]
iop = iop + 1
already = False
self.entity[g][ID]['c2d'] = tp2
#
# create a transcode between 3d index point and 2d index point
#
for g in self.entity:
for ID in self.entity[g]:
tr = {}
dr2 = self.entity[g][ID]['c2d']
for k in dr2.keys():
for u in dr2[k]['nump']:
tr[u] = k
self.entity[g][ID]['tr'] = tr
#
# Create a new index for 2d points
#
for g in self.entity:
for ID in self.entity[g]:
di = self.entity[g][ID]['index']
di2 = {}
for k in di.keys(): # for all polygons
ti2 = [] # reserve a list= for 2d indexes
lpoly = di[k] # get sequence of 3d points
for ip in lpoly:
ti2.append(self.entity[g][ID]['tr'][ip])
if len(np.unique(ti2)) == len(ti2):
di2[k] = ti2
self.entity[g][ID]['index2'] = di2
#
# Create Polygon2D
#
for g in self.entity:
for ID in self.entity[g]:
dp = {}
for ip in self.entity[g][ID]['index2']:
lp = self.entity[g][ID]['index2'][ip]
tp = []
for ip in lp:
tp.append(self.entity[g][ID]['c2d'][ip]['coord'])
poly = geo.Polygon(tp)
dp[ip] = poly
self.entity[g][ID]['poly'] = dp
#
#
#
def show(self, num=100,fig=[],ax=[]):
""" show entities
Parameters
----------
num : int
"""
if fig==[]:
f = plt.figure()
else:
f = fig
if ax ==[]:
a = f.add_subplot(111)
else:
a = ax
if num > len(self.entity.keys()):
group = self.entity.keys()
else:
group = [self.entity.keys()[num]]
for g in group:
for ID in self.entity[g]:
for p in self.entity[g][ID]['poly']:
colrand = hex(int((2**23)*sp.rand(1))+2**20).replace('0x', '#')
f,a = self.entity[g][ID]['poly'][p].plot(color=colrand,alpha=0.3,fig=f,ax=a)
plt.axis('scaled')
return f,a
def wallanalysis(self):
""" walls analysis
get height
"""
w = self.entity['WALL']
dwall = {}
for k in w.keys():
dwall[k] = {}
dwall[k]['ID'] = w[k]['ID']
#
# height retrieval
#
z = np.sort(np.unique(w[k]['coord'][:, 1]))
dwall[k]['zmin'] = min(z)
dwall[k]['zmax'] = max(z)
if len(z) == 2:
dwall[k]['door'] = False
else:
dwall[k]['door'] = True
dwall[k]['zdoor'] = z[1]
#
# Length,width retrieval
#
dp = w[k]['poly']
gxmin = 1e15
gymin = 1e15
gxmax = -1e15
gymax = -1e15
for ik in dp.keys():
pol = dp[ik]
bounds = pol.bounds
xmin = bounds[0]
ymin = bounds[1]
xmax = bounds[2]
ymax = bounds[3]
gxmin = min(gxmin, xmin)
gymin = min(gymin, ymin)
gxmax = max(gxmax, xmax)
gymax = max(gymax, ymax)
dx = gxmax-gxmin
dy = gymax-gymin
length = max(dx, dy)
width = min(dx, dy)
xmoy = (gxmin+gxmax)/2.
ymoy = (gymin+gymax)/2.
if (dx == width):
seg = shg.LineString(((xmoy, gymin), (xmoy, gymax)))
elif (dy == width):
seg = shg.LineString(((gxmin, ymoy), (gxmax, ymoy)))
# seg = stretch(seg,0.1)
dwall[k]['bounds'] = bounds
dwall[k]['thickness'] = width
dwall[k]['length'] = length
dwall[k]['seg'] = seg
return(dwall)
def show3entity(self, group, IDs):
""" geomview vizualisation of entity
Parameters
----------
group :
IDs :
"""
te = self.entity[group]
fi1 = 'entity'
GL = geo.Geomlist(fi1)
for k in IDs:
ID = te.keys()[k]
filename = fi1+'-'+str(k)
GL.append('{<'+filename+'.off}\n')
G = geo.Geomoff(filename)
c = te[ID]['coord']
i = te[ID]['index']
tt = []
ltt = []
print i
for u in i:
ltt.append(i[u])
# build a geomview list of polygons
print ltt
G.polygons(c, ltt)
GL.show3()
if __name__ == "__main__":
doctest.testmod()
#_filename = 'B11C-E1.wrl'
_filename = 'B2A-R0.wrl'
#_filename = 'B11C-E1.wrl'
filename = pyu.getlong(_filename,'struc/wrl')
VL = VLayout()
VL.load(filename)
dwall = VL.wallanalysis()
for iw in dwall:
seg = dwall[iw]['seg']
thick = dwall[iw]['thickness']
bdoor = dwall[iw]['door']
x,y = seg.xy
if bdoor:
plt.plot(x,y,color='r',linewidth=thick*10,alpha=1)
else:
plt.plot(x,y,color='k',linewidth=thick*10,alpha=1)
plt.axis('scaled')
for k in dwall:
seg = dwall[k]['seg'].xy
pta = np.r_[seg[0][0],seg[1][0]]
phe = np.r_[seg[0][1],seg[1][1]]
print pta,phe
|
mit
|
zfrenchee/pandas
|
pandas/tests/io/json/test_json_table_schema.py
|
5
|
18731
|
"""Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['idx']
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
result = build_table_schema(s, version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'foo', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
result = build_table_schema(s)
assert 'pandas_version' in result
def test_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
def test_multiindex(self):
df = self.df.copy()
idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
'fields': [{'name': 'level_0', 'type': 'string'},
{'name': 'level_1', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['level_0', 'level_1']
}
assert result == expected
df.index.names = ['idx0', None]
expected['fields'][0]['name'] = 'idx0'
expected['primaryKey'] = ['idx0', 'level_1']
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType(object):
def test_as_json_table_type_int_data(self):
int_data = [1, 2, 3]
int_types = [np.int, np.int16, np.int32, np.int64]
for t in int_types:
assert as_json_table_type(np.array(
int_data, dtype=t)) == 'integer'
def test_as_json_table_type_float_data(self):
float_data = [1., 2., 3.]
float_types = [np.float, np.float16, np.float32, np.float64]
for t in float_types:
assert as_json_table_type(np.array(
float_data, dtype=t)) == 'number'
def test_as_json_table_type_bool_data(self):
bool_data = [True, False]
bool_types = [bool, np.bool]
for t in bool_types:
assert as_json_table_type(np.array(
bool_data, dtype=t)) == 'boolean'
def test_as_json_table_type_date_data(self):
date_data = [pd.to_datetime(['2016']),
pd.to_datetime(['2016'], utc=True),
pd.Series(pd.to_datetime(['2016'])),
pd.Series(pd.to_datetime(['2016'], utc=True)),
pd.period_range('2016', freq='A', periods=3)]
for arr in date_data:
assert as_json_table_type(arr) == 'datetime'
def test_as_json_table_type_string_data(self):
strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])]
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_data(self):
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
assert as_json_table_type(pd.Series(pd.Categorical([1]))) == 'any'
assert as_json_table_type(pd.CategoricalIndex([1])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
# ------
# dtypes
# ------
def test_as_json_table_type_int_dtypes(self):
integers = [np.int, np.int16, np.int32, np.int64]
for t in integers:
assert as_json_table_type(t) == 'integer'
def test_as_json_table_type_float_dtypes(self):
floats = [np.float, np.float16, np.float32, np.float64]
for t in floats:
assert as_json_table_type(t) == 'number'
def test_as_json_table_type_bool_dtypes(self):
bools = [bool, np.bool]
for t in bools:
assert as_json_table_type(t) == 'boolean'
def test_as_json_table_type_date_dtypes(self):
# TODO: datedate.date? datetime.time?
dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(),
DatetimeTZDtype('ns', 'US/Central')]
for t in dates:
assert as_json_table_type(t) == 'datetime'
def test_as_json_table_type_timedelta_dtypes(self):
durations = [np.timedelta64, np.dtype("<m8[ns]")]
for t in durations:
assert as_json_table_type(t) == 'duration'
def test_as_json_table_type_string_dtypes(self):
strings = [object] # TODO
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_dtypes(self):
# TODO: I think before is_categorical_dtype(Categorical)
# returned True, but now it's False. Figure out why or
# if it matters
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(CategoricalDtype()) == 'any'
class TestTableOrient(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])),
'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'],
ordered=True)),
'G': [1., 2., 3, 4.],
'H': pd.date_range('2016-01-01', freq='d', periods=4,
tz='US/Central'),
},
index=pd.Index(range(4), name='idx'))
def test_build_series(self):
s = pd.Series([1, 2], name='a')
s.index.name = 'id'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [{'name': 'id', 'type': 'integer'},
{'name': 'a', 'type': 'integer'}]
schema = {
'fields': fields,
'primaryKey': ['id'],
}
expected = OrderedDict([
('schema', schema),
('data', [OrderedDict([('id', 0), ('a', 1)]),
OrderedDict([('id', 1), ('a', 2)])])])
assert result == expected
def test_to_json(self):
df = self.df.copy()
df.index.name = 'idx'
result = df.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [
{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'E',
'ordered': False,
'type': 'any'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'F',
'ordered': True,
'type': 'any'},
{'name': 'G', 'type': 'number'},
{'name': 'H', 'type': 'datetime', 'tz': 'US/Central'}
]
schema = {
'fields': fields,
'primaryKey': ['idx'],
}
data = [
OrderedDict([('idx', 0), ('A', 1), ('B', 'a'),
('C', '2016-01-01T00:00:00.000Z'),
('D', 'P0DT1H0M0S'),
('E', 'a'), ('F', 'a'), ('G', 1.),
('H', '2016-01-01T06:00:00.000Z')
]),
OrderedDict([('idx', 1), ('A', 2), ('B', 'b'),
('C', '2016-01-02T00:00:00.000Z'),
('D', 'P0DT1H1M0S'),
('E', 'b'), ('F', 'b'), ('G', 2.),
('H', '2016-01-02T06:00:00.000Z')
]),
OrderedDict([('idx', 2), ('A', 3), ('B', 'c'),
('C', '2016-01-03T00:00:00.000Z'),
('D', 'P0DT1H2M0S'),
('E', 'c'), ('F', 'c'), ('G', 3.),
('H', '2016-01-03T06:00:00.000Z')
]),
OrderedDict([('idx', 3), ('A', 4), ('B', 'c'),
('C', '2016-01-04T00:00:00.000Z'),
('D', 'P0DT1H3M0S'),
('E', 'c'), ('F', 'c'), ('G', 4.),
('H', '2016-01-04T06:00:00.000Z')
]),
]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_float_index(self):
data = pd.Series(1, index=[1., 2.])
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema', {
'fields': [{'name': 'index', 'type': 'number'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']
}),
('data', [OrderedDict([('index', 1.0), ('values', 1)]),
OrderedDict([('index', 2.0), ('values', 1)])])])
)
assert result == expected
def test_to_json_period_index(self):
idx = pd.period_range('2016', freq='Q-JAN', periods=2)
data = pd.Series(1, idx)
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'freq': 'Q-JAN', 'name': 'index', 'type': 'datetime'},
{'name': 'values', 'type': 'integer'}]
schema = {'fields': fields, 'primaryKey': ['index']}
data = [OrderedDict([('index', '2015-11-01T00:00:00.000Z'),
('values', 1)]),
OrderedDict([('index', '2016-02-01T00:00:00.000Z'),
('values', 1)])]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_categorical_index(self):
data = pd.Series(1, pd.CategoricalIndex(['a', 'b']))
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema',
{'fields': [{'name': 'index', 'type': 'any',
'constraints': {'enum': ['a', 'b']},
'ordered': False},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}),
('data', [
OrderedDict([('index', 'a'),
('values', 1)]),
OrderedDict([('index', 'b'), ('values', 1)])])])
)
assert result == expected
def test_date_format_raises(self):
with pytest.raises(ValueError):
self.df.to_json(orient='table', date_format='epoch')
# others work
self.df.to_json(orient='table', date_format='iso')
self.df.to_json(orient='table')
def test_make_field_int(self):
data = [1, 2, 3]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'integer'}
assert result == expected
def test_make_field_float(self):
data = [1., 2., 3.]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'number'}
assert result == expected
def test_make_field_datetime(self):
data = [1., 2., 3.]
kinds = [pd.Series(pd.to_datetime(data), name='values'),
pd.to_datetime(data)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime'}
assert result == expected
kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'),
pd.to_datetime(data, utc=True)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime', "tz": "UTC"}
assert result == expected
arr = pd.period_range('2016', freq='A-DEC', periods=4)
result = make_field(arr)
expected = {"name": "values", "type": 'datetime', "freq": "A-DEC"}
assert result == expected
def test_make_field_categorical(self):
data = ['a', 'b', 'c']
ordereds = [True, False]
for ordered in ordereds:
arr = pd.Series(pd.Categorical(data, ordered=ordered), name='cats')
result = make_field(arr)
expected = {"name": "cats", "type": "any",
"constraints": {"enum": data},
"ordered": ordered}
assert result == expected
arr = pd.CategoricalIndex(data, ordered=ordered, name='cats')
result = make_field(arr)
expected = {"name": "cats", "type": "any",
"constraints": {"enum": data},
"ordered": ordered}
assert result == expected
def test_categorical(self):
s = pd.Series(pd.Categorical(['a', 'b', 'a']))
s.index.name = 'idx'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'name': 'idx', 'type': 'integer'},
{'constraints': {'enum': ['a', 'b']},
'name': 'values',
'ordered': False,
'type': 'any'}]
expected = OrderedDict([
('schema', {'fields': fields,
'primaryKey': ['idx']}),
('data', [OrderedDict([('idx', 0), ('values', 'a')]),
OrderedDict([('idx', 1), ('values', 'b')]),
OrderedDict([('idx', 2), ('values', 'a')])])])
assert result == expected
def test_set_default_names_unset(self):
data = pd.Series(1, pd.Index([1]))
result = set_default_names(data)
assert result.index.name == 'index'
def test_set_default_names_set(self):
data = pd.Series(1, pd.Index([1], name='myname'))
result = set_default_names(data)
assert result.index.name == 'myname'
def test_set_default_names_mi_unset(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')]))
result = set_default_names(data)
assert result.index.names == ['level_0', 'level_1']
def test_set_default_names_mi_set(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')],
names=['n1', 'n2']))
result = set_default_names(data)
assert result.index.names == ['n1', 'n2']
def test_set_default_names_mi_partion(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')],
names=['n1', None]))
result = set_default_names(data)
assert result.index.names == ['n1', 'level_1']
def test_timestamp_in_columns(self):
df = pd.DataFrame([[1, 2]], columns=[pd.Timestamp('2016'),
pd.Timedelta(10, unit='s')])
result = df.to_json(orient="table")
js = json.loads(result)
assert js['schema']['fields'][1]['name'] == 1451606400000
assert js['schema']['fields'][2]['name'] == 10000
def test_overlapping_names(self):
cases = [
pd.Series([1], index=pd.Index([1], name='a'), name='a'),
pd.DataFrame({"A": [1]}, index=pd.Index([1], name="A")),
pd.DataFrame({"A": [1]}, index=pd.MultiIndex.from_arrays([
['a'], [1]
], names=["A", "a"])),
]
for data in cases:
with pytest.raises(ValueError) as excinfo:
data.to_json(orient='table')
assert 'Overlapping' in str(excinfo.value)
def test_mi_falsey_name(self):
# GH 16203
df = pd.DataFrame(np.random.randn(4, 4),
index=pd.MultiIndex.from_product([('A', 'B'),
('a', 'b')]))
result = [x['name'] for x in build_table_schema(df)['fields']]
assert result == ['level_0', 'level_1', 0, 1, 2, 3]
|
bsd-3-clause
|
mortada/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/__init__.py
|
20
|
12315
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
|
apache-2.0
|
jlegendary/SimpleCV
|
SimpleCV/examples/machine-learning/machine-learning_nuts-vs-bolts.py
|
12
|
2782
|
'''
This Example uses scikits-learn to do a binary classfication of images
of nuts vs. bolts. Only the area, height, and width are used to classify
the actual images but data is extracted from the images using blobs.
This is a very crude example and could easily be built upon, but is just
meant to give an introductory example for using machine learning
The data set should auto download, if not you can get it from:
https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip
'''
print __doc__
from SimpleCV import *
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
import numpy as np
#Download the dataset
machine_learning_data_set = 'https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip'
data_path = download_and_extract(machine_learning_data_set)
print 'Test Images Downloaded at:', data_path
display = Display((800,600)) #Display to show the images
target_names = ['bolt', 'nut']
print 'Loading Bolts for Training'
bolts = ImageSet(data_path + '/data/supervised/bolts') #Load Bolts for training
bolt_blobs = [b.findBlobs()[0] for b in bolts] #exact the blobs for our features
tmp_data = [] #array to store data features
tmp_target = [] #array to store targets
for b in bolt_blobs: #Format Data for SVM
tmp_data.append([b.area(), b.height(), b.width()])
tmp_target.append(0)
print 'Loading Nuts for Training'
nuts = ImageSet(data_path + '/data/supervised/nuts')
nut_blobs = [n.invert().findBlobs()[0] for n in nuts]
for n in nut_blobs:
tmp_data.append([n.area(), n.height(), n.width()])
tmp_target.append(1)
dataset = np.array(tmp_data)
targets = np.array(tmp_target)
print 'Training Machine Learning'
clf = LinearSVC()
clf = clf.fit(dataset, targets)
clf2 = LogisticRegression().fit(dataset, targets)
print 'Running prediction on bolts now'
untrained_bolts = ImageSet(data_path + '/data/unsupervised/bolts')
unbolt_blobs = [b.findBlobs()[0] for b in untrained_bolts]
for b in unbolt_blobs:
ary = [b.area(), b.height(), b.width()]
name = target_names[clf.predict(ary)[0]]
probability = clf2.predict_proba(ary)[0]
img = b.image
img.drawText(name)
img.save(display)
print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]
print 'Running prediction on nuts now'
untrained_nuts = ImageSet(data_path + '/data/unsupervised/nuts')
unnut_blobs = [n.invert().findBlobs()[0] for n in untrained_nuts]
for n in unnut_blobs:
ary = [n.area(), n.height(), n.width()]
name = target_names[clf.predict(ary)[0]]
probability = clf2.predict_proba(ary)[0]
img = n.image
img.drawText(name)
img.save(display)
print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]
|
bsd-3-clause
|
jstoxrocky/statsmodels
|
statsmodels/sandbox/tsa/garch.py
|
25
|
52178
|
'''general non-linear MLE for time series analysis
idea for general version
------------------------
subclass defines geterrors(parameters) besides loglike,...
and covariance matrix of parameter estimates (e.g. from hessian
or outerproduct of jacobian)
update: I don't really need geterrors directly, but get_h the conditional
variance process
new version Garch0 looks ok, time to clean up and test
no constraints yet
in some cases: "Warning: Maximum number of function evaluations has been exceeded."
Notes
-----
idea: cache intermediate design matrix for geterrors so it doesn't need
to be build at each function call
superclass or result class calculates result statistic based
on errors, loglike, jacobian and cov/hessian
-> aic, bic, ...
-> test statistics, tvalue, fvalue, ...
-> new to add: distribution (mean, cov) of non-linear transformation
-> parameter restrictions or transformation with corrected covparams (?)
-> sse, rss, rsquared ??? are they defined from this in general
-> robust parameter cov ???
-> additional residual based tests, NW, ... likelihood ratio, lagrange
multiplier tests ???
how much can be reused from linear model result classes where
`errorsest = y - X*beta` ?
for tsa: what's the division of labor between model, result instance
and process
examples:
* arma: ls and mle look good
* arimax: add exog, especially mean, trend, prefilter, e.g. (1-L)
* arma_t: arma with t distributed errors (just a change in loglike)
* garch: need loglike and (recursive) errorest
* regime switching model without unobserved state, e.g. threshold
roadmap for garch:
* simple case
* starting values: garch11 explicit formulas
* arma-garch, assumed separable, blockdiagonal Hessian
* empirical example: DJI, S&P500, MSFT, ???
* other standard garch: egarch, pgarch,
* non-normal distributions
* other methods: forecast, news impact curves (impulse response)
* analytical gradient, Hessian for basic garch
* cleaner simulation of garch
* result statistics, AIC, ...
* parameter constraints
* try penalization for higher lags
* other garch: regime-switching
for pgarch (power garch) need transformation of etax given
the parameters, but then misofilter should work
general class aparch (see garch glossary)
References
----------
see notes_references.txt
Created on Feb 6, 2010
@author: "josef pktd"
'''
from __future__ import print_function
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import optimize, signal
import matplotlib.pyplot as plt
import numdifftools as ndt
from statsmodels.base.model import Model, LikelihoodModelResults
from statsmodels.sandbox import tsa
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def normloglike(x, mu=0, sigma2=1, returnlls=False, axis=0):
x = np.asarray(x)
x = np.atleast_1d(x)
if axis is None:
x = x.ravel()
#T,K = x.shape
if x.ndim > 1:
nobs = x.shape[axis]
else:
nobs = len(x)
x = x - mu # assume can be broadcasted
if returnlls:
#Compute the individual log likelihoods if needed
lls = -0.5*(np.log(2*np.pi) + np.log(sigma2) + x**2/sigma2)
# Use these to comput the LL
LL = np.sum(lls,axis)
return LL, lls
else:
#Compute the log likelihood
#print(np.sum(np.log(sigma2),axis))
LL = -0.5 * (np.sum(np.log(sigma2),axis) + np.sum((x**2)/sigma2, axis) + nobs*np.log(2*np.pi))
return LL
# copied from model.py
class LikelihoodModel(Model):
"""
Likelihood model is a subclass of Model.
"""
def __init__(self, endog, exog=None):
super(LikelihoodModel, self).__init__(endog, exog)
self.initialize()
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
pass
#TODO: if the intent is to re-initialize the model with new data then
# this method needs to take inputs...
def loglike(self, params):
"""
Log-likelihood of model.
"""
raise NotImplementedError
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
"""
raise NotImplementedError
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def hessian(self, params):
"""
The Hessian matrix of the model
"""
raise NotImplementedError
def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
An optional
method : str
Method can be 'newton', 'bfgs', 'powell', 'cg', or 'ncg'.
The default is newton. See scipy.optimze for more information.
"""
methods = ['newton', 'bfgs', 'powell', 'cg', 'ncg', 'fmin']
if start_params is None:
start_params = [0]*self.exog.shape[1] # will fail for shape (K,)
if not method in methods:
raise ValueError("Unknown fit method %s" % method)
f = lambda params: -self.loglike(params)
score = lambda params: -self.score(params)
# hess = lambda params: -self.hessian(params)
hess = None
#TODO: can we have a unified framework so that we can just do func = method
# and write one call for each solver?
if method.lower() == 'newton':
iteration = 0
start = np.array(start_params)
history = [np.inf, start]
while (iteration < maxiter and np.all(np.abs(history[-1] - \
history[-2])>tol)):
H = self.hessian(history[-1])
newparams = history[-1] - np.dot(np.linalg.inv(H),
self.score(history[-1]))
history.append(newparams)
iteration += 1
mlefit = LikelihoodModelResults(self, newparams)
mlefit.iteration = iteration
elif method == 'bfgs':
score=None
xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
optimize.fmin_bfgs(f, start_params, score, full_output=1,
maxiter=maxiter, gtol=tol)
converge = not warnflag
mlefit = LikelihoodModelResults(self, xopt)
optres = 'xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag'
self.optimresults = dict(zip(optres.split(', '),[
xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag]))
elif method == 'ncg':
xopt, fopt, fcalls, gcalls, hcalls, warnflag = \
optimize.fmin_ncg(f, start_params, score, fhess=hess,
full_output=1, maxiter=maxiter, avextol=tol)
mlefit = LikelihoodModelResults(self, xopt)
converge = not warnflag
elif method == 'fmin':
#fmin(func, x0, args=(), xtol=0.0001, ftol=0.0001, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None)
xopt, fopt, niter, funcalls, warnflag = \
optimize.fmin(f, start_params,
full_output=1, maxiter=maxiter, xtol=tol)
mlefit = LikelihoodModelResults(self, xopt)
converge = not warnflag
self._results = mlefit
return mlefit
#TODO: I take it this is only a stub and should be included in another
# model class?
class TSMLEModel(LikelihoodModel):
"""
univariate time series model for estimation with maximum likelihood
Note: This is not working yet
"""
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(TSMLEModel, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def geterrors(self, params):
raise NotImplementedError
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
"""
raise NotImplementedError
def score(self, params):
"""
Score vector for Arma model
"""
#return None
#print(params
jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
return jac(params)[-1]
def hessian(self, params):
"""
Hessian of arma model. Currently uses numdifftools
"""
#return None
Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
return Hfun(params)[-1]
def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):
'''estimate model by minimizing negative loglikelihood
does this need to be overwritten ?
'''
if start_params is None and hasattr(self, '_start_params'):
start_params = self._start_params
#start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))
mlefit = super(TSMLEModel, self).fit(start_params=start_params,
maxiter=maxiter, method=method, tol=tol)
return mlefit
class Garch0(TSMLEModel):
'''Garch model,
still experimentation stage:
simplified structure, plain garch, no constraints
still looking for the design of the base class
serious bug:
ar estimate looks ok, ma estimate awful
-> check parameterization of lagpolys and constant
looks ok after adding missing constant
but still difference to garch11 function
corrected initial condition
-> only small differences left between the 3 versions
ar estimate is close to true/DGP model
note constant has different parameterization
but design looks better
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch0, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
# put this in fit (?) or in initialize instead
self._etax = endog**2
self._icetax = np.atleast_1d(self._etax.mean())
def initialize(self):
pass
def geth(self, params):
'''
Parameters
----------
params : tuple, (ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma, mu = params
#etax = self.endog #this would be enough for basic garch version
etax = self._etax + mu
icetax = self._icetax #read ic-eta-x, initial condition
#TODO: where does my go with lfilter ?????????????
# shouldn't matter except for interpretation
nobs = etax.shape[0]
#check arguments of lfilter
zi = signal.lfiltic(ma,ar, icetax)
#h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
#just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
h = signal.lfilter(ma, ar, etax, zi=zi)[0]
return h
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
make more generic with using function _convertparams
which could also include parameter transformation
_convertparams_in, _convertparams_out
allow for different distributions t, ged,...
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
# check where constant goes
#ma = np.zeros((q+1,3))
#ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma = np.concatenate(([0], params[p:p+q]))
mu = params[-1]
params = (ar, ma, mu) #(ar, ma)
h = self.geth(params)
#temporary safe for debugging:
self.params_converted = params
self.h = h #for testing
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(h)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #no choice of axis
# same as with y = self.endog, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
class GarchX(TSMLEModel):
'''Garch model,
still experimentation stage:
another version, this time with exog and miso_filter
still looking for the design of the base class
not done yet, just a design idea
* use misofilter as in garch (gjr)
* but take etax = exog
this can include constant, asymetric effect (gjr) and
other explanatory variables (e.g. high-low spread)
todo: renames
eta -> varprocess
etax -> varprocessx
icetax -> varprocessic (is actually ic of eta/sigma^2)
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch0, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
# put this in fit (?) or in initialize instead
#nobs defined in super - verify
#self.nobs = nobs = endog.shape[0]
#add nexog to super
#self.nexog = nexog = exog.shape[1]
self._etax = np.column_stack(np.ones((nobs,1)), endog**2, exog)
self._icetax = np.atleast_1d(self._etax.mean())
def initialize(self):
pass
def convert_mod2params(ar, ma, mu):
pass
def geth(self, params):
'''
Parameters
----------
params : tuple, (ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma, mu = params
#etax = self.endog #this would be enough for basic garch version
etax = self._etax + mu
icetax = self._icetax #read ic-eta-x, initial condition
#TODO: where does my go with lfilter ?????????????
# shouldn't matter except for interpretation
nobs = self.nobs
## #check arguments of lfilter
## zi = signal.lfiltic(ma,ar, icetax)
## #h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
## #just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
## h = signal.lfilter(ma, ar, etax, zi=zi)[0]
##
h = miso_lfilter(ar, ma, etax, useic=self._icetax)[0]
#print('h.shape', h.shape
hneg = h<0
if hneg.any():
#h[hneg] = 1e-6
h = np.abs(h)
#todo: raise warning, maybe not during optimization calls
return h
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
make more generic with using function _convertparams
which could also include parameter transformation
_convertparams_in, _convertparams_out
allow for different distributions t, ged,...
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
# check where constant goes
#ma = np.zeros((q+1,3))
#ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma = np.concatenate(([0], params[p:p+q]))
mu = params[-1]
params = (ar, ma, mu) #(ar, ma)
h = self.geth(params)
#temporary safe for debugging:
self.params_converted = params
self.h = h #for testing
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(h)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #no choice of axis
# same as with y = self.endog, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
class Garch(TSMLEModel):
'''Garch model gjrgarch (t-garch)
still experimentation stage, try with
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def initialize(self):
pass
def geterrors(self, params):
'''
Parameters
----------
params : tuple, (mu, ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma = params
eta = self.endog
nobs = eta.shape[0]
etax = np.empty((nobs,3))
etax[:,0] = 1
etax[:,1:] = (eta**2)[:,None]
etax[eta>0,2] = 0
#print('etax.shape', etax.shape
h = miso_lfilter(ar, ma, etax, useic=np.atleast_1d(etax[:,1].mean()))[0]
#print('h.shape', h.shape
hneg = h<0
if hneg.any():
#h[hneg] = 1e-6
h = np.abs(h)
#print('Warning negative variance found'
#check timing, starting time for h and eta, do they match
#err = np.sqrt(h[:len(eta)])*eta #np.random.standard_t(8, size=len(h))
# let it break if there is a len/shape mismatch
err = np.sqrt(h)*eta
return err, h, etax
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
#ar = np.concatenate(([1], -np.abs(params[:p]))) #???
#better safe than fast and sorry
#
ma = np.zeros((q+1,3))
ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma[:,1] = np.concatenate(([0], params[p:p+q]))
#delta lag coefficients for negative ma innovation
ma[:,2] = np.concatenate(([0], params[p+q:p+2*q]))
mu = params[-1]
params = (ar, ma) #(mu, ar, ma)
errorsest, h, etax = self.geterrors(params)
#temporary safe for debugging
self.params_converted = params
self.errorsest, self.h, self.etax = errorsest, h, etax
#h = h[:-1] #correct this in geterrors
#print('shapes errorsest, h, etax', errorsest.shape, h.shape, etax.shape
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(errorsest)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #not used
# muy = errorsest.mean()
# # llike is verified, see below
# # same as with y = errorsest, ht = sigma2
# # np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
# llike = -0.5 * (np.sum(np.log(sigma2),axis)
# + np.sum(((errorsest)**2)/sigma2, axis)
# + nobs*np.log(2*np.pi))
# return llike
muy = errorsest.mean()
# llike is verified, see below
# same as with y = errorsest, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
def gjrconvertparams(self, params, nar, nma):
"""
flat to matrix
Notes
-----
needs to be overwritten by subclass
"""
p, q = nar, nma
ar = np.concatenate(([1], params[:p]))
#ar = np.concatenate(([1], -np.abs(params[:p]))) #???
#better safe than fast and sorry
#
ma = np.zeros((q+1,3))
ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma[:,1] = np.concatenate(([0], params[p:p+q]))
#delta lag coefficients for negative ma innovation
ma[:,2] = np.concatenate(([0], params[p+q:p+2*q]))
mu = params[-1]
params2 = (ar, ma) #(mu, ar, ma)
return paramsclass
#TODO: this should be generalized to ARMA?
#can possibly also leverage TSME above
# also note that this is NOT yet general
# it was written for my homework, assumes constant is zero
# and that process is AR(1)
# examples at the end of run as main below
class AR(LikelihoodModel):
"""
Notes
-----
This is not general, only written for the AR(1) case.
Fit methods that use super and broyden do not yet work.
"""
def __init__(self, endog, exog=None, nlags=1):
if exog is None: # extend to handle ADL(p,q) model? or subclass?
exog = endog[:-nlags]
endog = endog[nlags:]
super(AR, self).__init__(endog, exog)
self.nobs += nlags # add lags back to nobs for real T
#TODO: need to fix underscore in Model class.
#Done?
def initialize(self):
pass
def loglike(self, params):
"""
The unconditional loglikelihood of an AR(p) process
Notes
-----
Contains constant term.
"""
nobs = self.nobs
y = self.endog
ylag = self.exog
penalty = self.penalty
if isinstance(params,tuple):
# broyden (all optimize.nonlin return a tuple until rewrite commit)
params = np.asarray(params)
usepenalty=False
if not np.all(np.abs(params)<1) and penalty:
oldparams = params
params = np.array([.9999]) # make it the edge
usepenalty=True
diffsumsq = sumofsq(y-np.dot(ylag,params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
loglike = -nobs/2 * np.log(2*np.pi) - nobs/2*np.log(sigma2) + \
.5 * np.log(1-params**2) - .5*diffsumsq/sigma2 -\
ylag[0]**2 * (1-params**2)/(2*sigma2)
if usepenalty:
# subtract a quadratic penalty since we min the negative of loglike
loglike -= 1000 *(oldparams-.9999)**2
return loglike
def score(self, params):
"""
Notes
-----
Need to generalize for AR(p) and for a constant.
Not correct yet. Returns numerical gradient. Depends on package
numdifftools.
"""
y = self.endog
ylag = self.exog
nobs = self.nobs
diffsumsq = sumofsq(y-np.dot(ylag,params))
dsdr = 1/nobs * -2 *np.sum(ylag*(y-np.dot(ylag,params))[:,None])+\
2*params*ylag[0]**2
sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
gradient = -nobs/(2*sigma2)*dsdr + params/(1-params**2) + \
1/sigma2*np.sum(ylag*(y-np.dot(ylag, params))[:,None])+\
.5*sigma2**-2*diffsumsq*dsdr+\
ylag[0]**2*params/sigma2 +\
ylag[0]**2*(1-params**2)/(2*sigma2**2)*dsdr
if self.penalty:
pass
j = Jacobian(self.loglike)
return j(params)
# return gradient
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now. Depends on numdifftools.
"""
h = Hessian(self.loglike)
return h(params)
def fit(self, start_params=None, method='bfgs', maxiter=35, tol=1e-08,
penalty=False):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
start_params : array-like, optional
A first guess on the parameters. Defaults is a vector of zeros.
method : str, optional
Unconstrained solvers:
Default is 'bfgs', 'newton' (newton-raphson), 'ncg'
(Note that previous 3 are not recommended at the moment.)
and 'powell'
Constrained solvers:
'bfgs-b', 'tnc'
See notes.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol = float
The convergence tolerance. Default is 1e-08.
penalty : bool
Whether or not to use a penalty function. Default is False,
though this is ignored at the moment and the penalty is always
used if appropriate. See notes.
Notes
-----
The unconstrained solvers use a quadratic penalty (regardless if
penalty kwd is True or False) in order to ensure that the solution
stays within (-1,1). The constrained solvers default to using a bound
of (-.999,.999).
"""
self.penalty = penalty
method = method.lower()
#TODO: allow user-specified penalty function
# if penalty and method not in ['bfgs_b','tnc','cobyla','slsqp']:
# minfunc = lambda params : -self.loglike(params) - \
# self.penfunc(params)
# else:
minfunc = lambda params: -self.loglike(params)
if method in ['newton', 'bfgs', 'ncg']:
super(AR, self).fit(start_params=start_params, method=method,
maxiter=maxiter, tol=tol)
else:
bounds = [(-.999,.999)] # assume stationarity
if start_params == None:
start_params = np.array([0]) #TODO: assumes AR(1)
if method == 'bfgs-b':
retval = optimize.fmin_l_bfgs_b(minfunc, start_params,
approx_grad=True, bounds=bounds)
self.params, self.llf = retval[0:2]
if method == 'tnc':
retval = optimize.fmin_tnc(minfunc, start_params,
approx_grad=True, bounds = bounds)
self.params = retval[0]
if method == 'powell':
retval = optimize.fmin_powell(minfunc,start_params)
self.params = retval[None]
#TODO: write regression tests for Pauli's branch so that
# new line_search and optimize.nonlin can get put in.
#http://projects.scipy.org/scipy/ticket/791
# if method == 'broyden':
# retval = optimize.broyden2(minfunc, [.5], verbose=True)
# self.results = retval
class Arma(LikelihoodModel):
"""
univariate Autoregressive Moving Average model
Note: This is not working yet, or does it
this can subclass TSMLEModel
"""
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Arma, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def initialize(self):
pass
def geterrors(self, params):
#copied from sandbox.tsa.arima.ARIMA
p, q = self.nar, self.nma
rhoy = np.concatenate(([1], params[:p]))
rhoe = np.concatenate(([1], params[p:p+q]))
errorsest = signal.lfilter(rhoy, rhoe, self.endog)
return errorsest
def loglike(self, params):
"""
Loglikelihood for arma model
Notes
-----
The ancillary parameter is assumed to be the last element of
the params vector
"""
# #copied from sandbox.tsa.arima.ARIMA
# p = self.nar
# rhoy = np.concatenate(([1], params[:p]))
# rhoe = np.concatenate(([1], params[p:-1]))
# errorsest = signal.lfilter(rhoy, rhoe, self.endog)
errorsest = self.geterrors(params)
sigma2 = np.maximum(params[-1]**2, 1e-6)
axis = 0
nobs = len(errorsest)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
# llike = -0.5 * (np.sum(np.log(sigma2),axis)
# + np.sum((errorsest**2)/sigma2, axis)
# + nobs*np.log(2*np.pi))
llike = -0.5 * (nobs*np.log(sigma2)
+ np.sum((errorsest**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
def score(self, params):
"""
Score vector for Arma model
"""
#return None
#print(params
jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
return jac(params)[-1]
def hessian(self, params):
"""
Hessian of arma model. Currently uses numdifftools
"""
#return None
Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
return Hfun(params)[-1]
def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):
if start_params is None:
start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))
mlefit = super(Arma, self).fit(start_params=start_params,
maxiter=maxiter, method=method, tol=tol)
return mlefit
def generate_kindofgarch(nobs, ar, ma, mu=1.):
'''simulate garch like process but not squared errors in arma
used for initial trial but produces nice graph
'''
#garm1, gmam1 = [0.4], [0.2]
#pqmax = 1
# res = np.zeros(nobs+pqmax)
# rvs = np.random.randn(nobs+pqmax,2)
# for t in range(pqmax,nobs+pqmax):
# res[i] =
#ar = [1.0, -0.99]
#ma = [1.0, 0.5]
#this has the wrong distribution, should be eps**2
#TODO: use new version tsa.arima.??? instead, has distr option
#arest = tsa.arima.ARIMA()
#arest = tsa.arima.ARIMA #try class method, ARIMA needs data in constructor
from statsmodels.tsa.arima_process import arma_generate_sample
h = arma_generate_sample(ar,ma,nobs,0.1)
#h = np.abs(h)
h = (mu+h)**2
h = np.exp(h)
err = np.sqrt(h)*np.random.randn(nobs)
return err, h
def generate_garch(nobs, ar, ma, mu=1., scale=0.1):
'''simulate standard garch
scale : float
scale/standard deviation of innovation process in GARCH process
'''
eta = scale*np.random.randn(nobs)
# copied from armageneratesample
h = signal.lfilter(ma, ar, eta**2)
#
#h = (mu+h)**2
#h = np.abs(h)
#h = np.exp(h)
#err = np.sqrt(h)*np.random.randn(nobs)
err = np.sqrt(h)*eta #np.random.standard_t(8, size=nobs)
return err, h
def generate_gjrgarch(nobs, ar, ma, mu=1., scale=0.1, varinnovation=None):
'''simulate gjr garch process
Parameters
----------
ar : array_like, 1d
autoregressive term for variance
ma : array_like, 2d
moving average term for variance, with coefficients for negative
shocks in second column
mu : float
constant in variance law of motion
scale : float
scale/standard deviation of innovation process in GARCH process
Returns
-------
err : array 1d, (nobs+?,)
simulated gjr-garch process,
h : array 1d, (nobs+?,)
simulated variance
etax : array 1d, (nobs+?,)
data matrix for constant and ma terms in variance equation
Notes
-----
References
----------
'''
if varinnovation is None: # rename ?
eta = scale*np.random.randn(nobs)
else:
eta = varinnovation
# copied from armageneratesample
etax = np.empty((nobs,3))
etax[:,0] = mu
etax[:,1:] = (eta**2)[:,None]
etax[eta>0,2] = 0
h = miso_lfilter(ar, ma, etax)[0]
#
#h = (mu+h)**2
#h = np.abs(h)
#h = np.exp(h)
#err = np.sqrt(h)*np.random.randn(nobs)
#print('h.shape', h.shape)
err = np.sqrt(h[:len(eta)])*eta #np.random.standard_t(8, size=len(h))
return err, h, etax
def loglike_GARCH11(params, y):
# Computes the likelihood vector of a GARCH11
# assumes y is centered
w = params[0] # constant (1);
alpha = params[1] # coefficient of lagged squared error
beta = params[2] # coefficient of lagged variance
y2 = y**2;
nobs = y2.shape[0]
ht = np.zeros(nobs);
ht[0] = y2.mean() #sum(y2)/T;
for i in range(1,nobs):
ht[i] = w + alpha*y2[i-1] + beta * ht[i-1]
sqrtht = np.sqrt(ht)
x = y/sqrtht
llvalues = -0.5*np.log(2*np.pi) - np.log(sqrtht) - 0.5*(x**2);
return llvalues.sum(), llvalues, ht
from statsmodels.tsa.filters.filtertools import miso_lfilter
#copied to statsmodels.tsa.filters.filtertools
def miso_lfilter_old(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
def test_misofilter():
x = np.arange(20).reshape(10,2)
y, inp = miso_lfilter([1., -1],[[1,1],[0,0]], x)
assert_almost_equal(y[:-1], x.sum(1).cumsum(), decimal=15)
inp2 = signal.convolve(np.arange(20),np.ones(2))[1::2]
assert_almost_equal(inp[:-1], inp2, decimal=15)
inp2 = signal.convolve(np.arange(20),np.ones(4))[1::2]
y, inp = miso_lfilter([1., -1],[[1,1],[1,1]], x)
assert_almost_equal(y, inp2.cumsum(), decimal=15)
assert_almost_equal(inp, inp2, decimal=15)
y, inp = miso_lfilter([1., 0],[[1,1],[1,1]], x)
assert_almost_equal(y, inp2, decimal=15)
assert_almost_equal(inp, inp2, decimal=15)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
y3 = (x3*np.array([-2,3,1])).sum(1)
assert_almost_equal(y[:-1], y3, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,1]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,1.0,0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,0]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[1.0,0.0,0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y, inp = miso_lfilter([1., -1],np.array([[-2.0,3,1],[1.0,0.0,0]]),x3)
assert_almost_equal(y[:-1], y4.cumsum(), decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,2]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,1.0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y, inp = miso_lfilter([1., -1],np.array([[-2.0,3,1],[0.0,0.0,1.0]]),x3)
assert_almost_equal(y[:-1], y4.cumsum(), decimal=15)
y, inp = miso_lfilter([1., 0],[[1,0],[1,0],[1,0]], x)
yt = np.convolve(x[:,0], [1,1,1])
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
y, inp = miso_lfilter([1., 0],[[0,1],[0,1],[0,1]], x)
yt = np.convolve(x[:,1], [1,1,1])
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
y, inp = miso_lfilter([1., 0],[[0,1],[0,1],[1,1]], x)
yt = np.convolve(x[:,1], [1,1,1])
yt[2:] += x[:,0]
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
def test_gjrgarch():
# test impulse response of gjr simulator
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht = np.array([ 1., 0.1, 0.05, 0.01, 0., 0. ])
assert_almost_equal(hgjr5[:6], ht, decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
assert_almost_equal(hgjr5[:6], ht.cumsum(), decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht1 = [0]
for h in ht: ht1.append(h-ht1[-1])
assert_almost_equal(hgjr5[:6], ht1[1:], decimal=15)
# negative shock
varinno = np.zeros(100)
varinno[0] = -1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht = np.array([ 1. , 0.9 , 0.75, 0.61, 0. , 0. ])
assert_almost_equal(hgjr5[:6], ht, decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
assert_almost_equal(hgjr5[:6], ht.cumsum(), decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht1 = [0]
for h in ht: ht1.append(h-ht1[-1])
assert_almost_equal(hgjr5[:6], ht1[1:], decimal=15)
'''
>>> print(signal.correlate(x3, np.array([[-2.0,3,1],[0.0,0.0,0]])[::-1,:],mode='full')[:-1, (x3.shape[1]+1)//2]
[ -1. 7. 15. 23. 31. 39. 47. 55. 63. 71.]
>>> (x3*np.array([-2,3,1])).sum(1)
array([ -1., 7., 15., 23., 31., 39., 47., 55., 63., 71.])
'''
def garchplot(err, h, title='Garch simulation'):
plt.figure()
plt.subplot(311)
plt.plot(err)
plt.title(title)
plt.ylabel('y')
plt.subplot(312)
plt.plot(err**2)
plt.ylabel('$y^2$')
plt.subplot(313)
plt.plot(h)
plt.ylabel('conditional variance')
if __name__ == '__main__':
#test_misofilter()
#test_gjrgarch()
examples = ['garch']
if 'arma' in examples:
arest = tsa.arima.ARIMA()
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
y1 -= y1.mean() #no mean correction/constant in estimation so far
arma1 = Arma(y1)
arma1.nar = 1
arma1.nma = 1
arma1res = arma1.fit(method='fmin')
print(arma1res.params)
#Warning need new instance otherwise results carry over
arma2 = Arma(y1)
res2 = arma2.fit(method='bfgs')
print(res2.params)
print(res2.model.hessian(res2.params))
print(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))
resls = arest.fit(y1,1,1)
print(resls[0])
print(resls[1])
print('\nparameter estimate')
print('parameter of DGP ar(1), ma(1), sigma_error')
print([-0.8, 0.5, 0.1])
print('mle with fmin')
print(arma1res.params)
print('mle with bfgs')
print(res2.params)
print('cond. least squares uses optim.leastsq ?')
errls = arest.error_estimate
print(resls[0], np.sqrt(np.dot(errls,errls)/errls.shape[0]))
err = arma1.geterrors(res2.params)
print('cond least squares parameter cov')
#print(np.dot(err,err)/err.shape[0] * resls[1])
#errls = arest.error_estimate
print(np.dot(errls,errls)/errls.shape[0] * resls[1])
# print('fmin hessian')
# print(arma1res.model.optimresults['Hopt'][:2,:2])
print('bfgs hessian')
print(res2.model.optimresults['Hopt'][:2,:2])
print('numdifftools inverse hessian')
print(-np.linalg.inv(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))[:2,:2])
arma3 = Arma(y1**2)
res3 = arma3.fit(method='bfgs')
print(res3.params)
nobs = 1000
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print('seed', seed)
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print('seed', seed)
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.4,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h)
#plt.show()
print('Garch11')
print(optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2]))
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print('ggres0.params', ggres0.params)
if 'rpy' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print('R acf', tsa.acf(np.power(x,2))[:15])
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print(arma3res.params)
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print(arma3bres.params)
llf = loglike_GARCH11([0.93, 0.9, 0.2], errgjr4)
print(llf[0])
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
''' this looks relatively good
>>> Arma.initialize = lambda x: x
>>> arma3 = Arma(errgjr4**2)
>>> arma3res = arma3.fit()
Warning: Maximum number of function evaluations has been exceeded.
>>> arma3res.params
array([-0.775, -0.583, -0.001])
>>> arma2.nar
1
>>> arma2.nma
1
unit root ?
>>> arma3 = Arma(hgjr4)
>>> arma3res = arma3.fit()
Optimization terminated successfully.
Current function value: -3641.529780
Iterations: 250
Function evaluations: 458
>>> arma3res.params
array([ -1.000e+00, -3.096e-04, 6.343e-03])
or maybe not great
>>> arma3res = arma3.fit(start_params=[-0.8,0.1,0.5],maxiter=5000)
Warning: Maximum number of function evaluations has been exceeded.
>>> arma3res.params
array([-0.086, 0.186, -0.001])
>>> arma3res = arma3.fit(start_params=[-0.8,0.1,0.5],maxiter=5000,method='bfgs')
Divide-by-zero encountered: rhok assumed large
Optimization terminated successfully.
Current function value: -5988.332952
Iterations: 16
Function evaluations: 245
Gradient evaluations: 49
>>> arma3res.params
array([ -9.995e-01, -9.715e-01, 6.501e-04])
'''
'''
current problems
persistence in errgjr looks too low, small tsa.acf(errgjr4**2)[:15]
as a consequence the ML estimate has also very little persistence,
estimated ar term is much too small
-> need to compare with R or matlab
help.search("garch") : ccgarch, garchSim(fGarch), garch(tseries)
HestonNandiGarchFit(fOptions)
> library('fGarch')
> spec = garchSpec()
> x = garchSim(model = spec@model, n = 500)
> acf(x**2) # has low correlation
but fit has high parameters:
> fit = garchFit(~garch(1, 1), data = x)
with rpy:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4)
Final Estimate:
LLH: -3198.2 norm LLH: -3.1982
mu omega alpha1 beta1
1.870485e-04 9.437557e-05 3.457349e-02 1.000000e-08
second run with ar = [1.0, -0.8] ma = [[1., 0, 0],[0, 1.0,0.0]]
Final Estimate:
LLH: -3979.555 norm LLH: -3.979555
mu omega alpha1 beta1
1.465050e-05 1.641482e-05 1.092600e-01 9.654438e-02
mine:
>>> ggres.params
array([ -2.000e-06, 3.283e-03, 3.769e-01, -1.000e-06])
another rain, same ar, ma
Final Estimate:
LLH: -3956.197 norm LLH: -3.956197
mu omega alpha1 beta1
7.487278e-05 1.171238e-06 1.511080e-03 9.440843e-01
every step needs to be compared and tested
something looks wrong with likelihood function, either a silly
mistake or still some conceptional problems
* found the silly mistake, I was normalizing the errors before
plugging into espression for likelihood function
* now gjr garch estimation works and produces results that are very
close to the explicit garch11 estimation
initial conditions for miso_filter need to be cleaned up
lots of clean up to to after the bug hunting
'''
y = np.random.randn(20)
params = [0.93, 0.9, 0.2]
lls, llt, ht = loglike_GARCH11(params, y)
sigma2 = ht
axis=0
nobs = len(ht)
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum((y**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
print(lls, llike)
#print(np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum())
'''
>>> optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4)[0], [0.93, 0.9, 0.2])
Optimization terminated successfully.
Current function value: 7312.393886
Iterations: 95
Function evaluations: 175
array([ 3.691, 0.072, 0.932])
>>> ar
[1.0, -0.93000000000000005]
>>> ma
[[1.0, 0, 0], [0, 0.90000000000000002, 0.0]]
'''
np.random.seed(1)
tseries = np.zeros(200) # set first observation
for i in range(1,200): # get 99 more observations based on the given process
error = np.random.randn()
tseries[i] = .9 * tseries[i-1] + .01 * error
tseries = tseries[100:]
armodel = AR(tseries)
#armodel.fit(method='bfgs-b')
#armodel.fit(method='tnc')
#powell should be the most robust, see Hamilton 5.7
armodel.fit(method='powell', penalty=True)
# The below don't work yet
#armodel.fit(method='newton', penalty=True)
#armodel.fit(method='broyden', penalty=True)
print("Unconditional MLE for AR(1) y_t = .9*y_t-1 +.01 * err")
print(armodel.params)
|
bsd-3-clause
|
jlegendary/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
205
|
5878
|
import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
|
bsd-3-clause
|
charanpald/sandbox
|
sandbox/ranking/TreeRankR.py
|
1
|
6834
|
import gc
import numpy
import logging
import rpy2.robjects as robjects
import sklearn.cross_val as cross_val
from sandbox.util.Util import Util
from sandbox.util.Parameter import Parameter
from sandbox.util.Evaluator import Evaluator
from exp.metabolomics.AbstractTreeRankR import AbstractTreeRankR
class TreeRankR(AbstractTreeRankR):
"""
A wrapper for the TreeRank code written in R.
"""
def __init__(self):
super(TreeRankR, self).__init__()
def learnModelDataFrame(self, formula, XY):
"""
Learn a tree using a DataFrame XY and formula.
"""
if not self.printDebug:
self.baseLib.sink("/dev/null")
self.tree = self.treeRankLib.TreeRank(formula, XY, bestresponse=self.bestResponse, LeafRank=self.leafRank, nfcv=self.nfcv, varsplit=self.varSplit, growing=self.growing)
if not self.printDebug:
self.baseLib.sink()
def evaluateCvOuter(self, X, Y, folds, leafRank, innerFolds=3):
"""
Run model selection and output some ROC curves. In this case Y is a 1D array.
"""
Parameter.checkClass(X, numpy.ndarray)
Parameter.checkClass(Y, numpy.ndarray)
Parameter.checkInt(folds, 2, float('inf'))
if Y.ndim != 1:
raise ValueError("Expecting Y to be 1D")
indexList = cross_val.StratifiedKFold(Y, folds)
maxDepths = numpy.flipud(numpy.arange(1, 12, 1))
if leafRank == self.getTreeRankLib().LRforest:
varSplits = numpy.arange(0.6, 1.01, 0.2)
else:
varSplits = numpy.array([1])
#According to Nicolas nfcv>1 doesn't help
nfcvs = [1]
#This is tied in with depth
mincrit = 0.00
#If minsplit is too low sometimes get a node with no positive labels
minSplits = numpy.array([50])
self.setLeafRank(leafRank)
bestParams = []
bestTrainAUCs = numpy.zeros(folds)
bestTrainROCs = []
bestTestAUCs = numpy.zeros(folds)
bestTestROCs = []
bestMetaDicts = []
i = 0
for trainInds, testInds in indexList:
trainX, trainY = X[trainInds, :], Y[trainInds]
testX, testY = X[testInds, :], Y[testInds]
meanParamAUCs = []
paramList = []
logging.debug("Distribution of labels in train: " + str(numpy.bincount(trainY)))
logging.debug("Distribution of labels in test: " + str(numpy.bincount(testY)))
for varSplit in varSplits:
for nfcv in nfcvs:
for minSplit in minSplits:
self.setMaxDepth(maxDepths[0])
self.setVarSplit(varSplit)
self.setNfcv(nfcv)
self.setMinSplit(minSplit)
logging.debug(self)
idx = cross_val.StratifiedKFold(trainY, innerFolds)
j = 0
metrics = numpy.zeros((len(idx), maxDepths.shape[0]))
for idxtr, idxts in idx:
Util.printIteration(j, 1, innerFolds)
innerTrainX, innerTestX = trainX[idxtr, :], trainX[idxts, :]
innerTrainY, innerTestY = trainY[idxtr], trainY[idxts]
self.learnModel(innerTrainX, innerTrainY)
for k in range(maxDepths.shape[0]):
maxDepth = maxDepths[k]
robjects.globalenv["maxDepth"] = maxDepth
robjects.globalenv["tree"] = self.tree
nodeList = robjects.r('tree$nodes[tree$depth>=maxDepth]')
self.tree = self.treeRankLib.subTreeRank(self.tree, nodeList)
predY = self.predict(innerTestX)
gc.collect()
metrics[j, k] = Evaluator.auc(predY, innerTestY)
j += 1
meanAUC = numpy.mean(metrics, 0)
varAUC = numpy.var(metrics, 0)
logging.warn(self.baseLib.warnings())
logging.debug("Mean AUCs and variances at each depth " + str((meanAUC, varAUC)))
for k in range(maxDepths.shape[0]):
maxDepth = maxDepths[k]
meanParamAUCs.append(meanAUC[k])
paramList.append((maxDepth, varSplit, nfcv, minSplit))
#Try to get some memory back
gc.collect()
robjects.r('gc(verbose=TRUE)')
robjects.r('memory.profile()')
#print(self.hp.heap())
#Now choose best params
bestInd = numpy.argmax(numpy.array(meanParamAUCs))
self.setMaxDepth(paramList[bestInd][0])
self.setVarSplit(paramList[bestInd][1])
self.setNfcv(paramList[bestInd][2])
self.setMinSplit(paramList[bestInd][3])
self.learnModel(trainX, trainY)
predTrainY = self.predict(trainX)
predTestY = self.predict(testX)
bestTrainAUCs[i] = Evaluator.auc(predTrainY, trainY)
bestTestAUCs[i] = Evaluator.auc(predTestY, testY)
#Store the parameters and ROC curves
bestParams.append(paramList[bestInd])
bestTrainROCs.append(Evaluator.roc(trainY, predTrainY))
bestTestROCs.append(Evaluator.roc(testY, predTestY))
metaDict = {}
metaDict["size"] = self.getTreeSize()
metaDict["depth"] = self.getTreeDepth()
bestMetaDicts.append(metaDict)
i += 1
allMetrics = [bestTrainAUCs, bestTrainROCs, bestTestAUCs, bestTestROCs]
return (bestParams, allMetrics, bestMetaDicts)
def getTreeSize(self):
return len(self.tree[2])
def getTreeDepth(self):
return numpy.max(numpy.array(self.tree[14]))
def __str__(self):
#Just write out the parameters
outStr = "TreeRank:"
if self.leafRank == self.treeRankLib.LRCart:
outStr += " LeafRank=CART"
elif self.leafRank == self.treeRankLib.LRsvm:
outStr += " LeafRank=SVM"
elif self.leafRank == self.treeRankLib.LRforest:
outStr += " LeafRank=Random Forests"
outStr += " maxDepth=" + str(self.maxDepth)
outStr += " varSplit=" + str(self.varSplit)
outStr += " nfcv=" + str(self.nfcv)
outStr += " minSplit=" + str(self.minSplit)
return outStr
def getModel(self):
return self.tree
|
gpl-3.0
|
person142/scipy
|
doc/source/tutorial/examples/normdiscr_plot1.py
|
36
|
1547
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints // 2
npointsf = float(npoints)
nbound = 4 # bounds for the truncated normal
normbound = (1 + 1/npointsf) * nbound # actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) # integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound # bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) # fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
f, l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
|
bsd-3-clause
|
KaranToor/MA450
|
google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/api/appinfo.py
|
2
|
95880
|
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppInfo tools.
This library allows you to work with AppInfo records in memory, as well as store
and load from configuration files.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
# Parts of the code in this file are duplicated in
# //java/com/google/apphosting/admin/legacy/...
# This is part of an ongoing effort to replace the deployment API.
# Until we can delete this code, please check to see if your changes need
# to be reflected in the java code. For questions, talk to clouser@ or
import logging
import os
import re
import string
import sys
import wsgiref.util
# pylint: disable=g-import-not-at-top
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
# This case covers both Python 2.5 and unittests, which are 2.5 only.
from googlecloudsdk.third_party.appengine.api import validation
from googlecloudsdk.third_party.appengine.api import yaml_builder
from googlecloudsdk.third_party.appengine.api import yaml_listener
from googlecloudsdk.third_party.appengine.api import yaml_object
from googlecloudsdk.third_party.appengine.api import appinfo_errors
from googlecloudsdk.third_party.appengine.api import backendinfo
# pylint: enable=g-import-not-at-top
# Regular expression for matching URL, file, URL root regular expressions.
# `url_root` is identical to url except it additionally imposes not ending with
# *.
# TODO(user): `url_root` should generally allow a URL but not a regex or
# glob.
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'.+'
_URL_ROOT_REGEX = r'/.*'
# Regular expression for matching cache expiration deltas.
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_NON_WHITE_SPACE_REGEX = r'^\S+$'
# Regular expression for matching service names.
# TODO(arb): this may need altering so as to not leak unreleased service names
# TODO(user): Re-add sms to list of services.
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
# Regular expression for matching page names.
_PAGE_NAME_REGEX = r'^.+$'
# Constants for interpreting expiration deltas.
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
# Constant values from `apphosting/base/constants.h`
# TODO(user): Maybe a python constants file.
APP_ID_MAX_LEN = 100
MODULE_ID_MAX_LEN = 63
# See b/5485871 for why this is 100 and not 63.
# NOTE(user): See b/5485871 for why this is different from the
# `apphosting/base/constants.h` value.
MODULE_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
# The character separating the partition from the domain.
PARTITION_SEPARATOR = '~'
# The character separating the domain from the display-app-id.
DOMAIN_SEPARATOR = ':'
# The character separating major and minor versions.
VERSION_SEPARATOR = '.'
# The character separating module from module version.
MODULE_SEPARATOR = ':'
# The name of the default module
DEFAULT_MODULE = 'default'
# Regular expression for ID types. Defined in apphosting/base/id_util.cc.
PARTITION_RE_STRING_WITHOUT_SEPARATOR = (r'[a-z\d\-]{1,%d}' % APP_ID_MAX_LEN)
PARTITION_RE_STRING = (r'%s\%s' %
(PARTITION_RE_STRING_WITHOUT_SEPARATOR,
PARTITION_SEPARATOR))
DOMAIN_RE_STRING_WITHOUT_SEPARATOR = (r'(?!\-)[a-z\d\-\.]{1,%d}' %
APP_ID_MAX_LEN)
DOMAIN_RE_STRING = (r'%s%s' %
(DOMAIN_RE_STRING_WITHOUT_SEPARATOR, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
# NOTE(user,user): These regexes have been copied to multiple other
# locations in google.apphosting so we don't have to pull this file into
# python_lib for other modules to work in production.
# Other known locations as of 2016-08-15:
# - java/com/google/apphosting/admin/legacy/LegacyAppInfo.java
# - apphosting/client/app_config_old.cc
# - apphosting/api/app_config/app_config_server2.cc
MODULE_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (MODULE_ID_MAX_LEN - 1)
MODULE_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(MODULE_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
# Note that this regex will not allow zero-prefixed numbers, e.g. 0001.
_INSTANCES_REGEX = r'^[1-9][\d]*$'
_INSTANCE_CLASS_REGEX = r'^([fF](1|2|4|4_1G)|[bB](1|2|4|8|4_1G))$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
# This enforces that we will only accept a single decimal point of accuracy at
# the granularity of seconds and no decimal point with a granularity of
# milliseconds.
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
GCE_RESOURCE_NAME_REGEX = r'^[a-z]([a-z\d-]{0,61}[a-z\d])?$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
# Note(user): This must match api/app_config.py
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'[a-z][a-z0-9\-]{0,29}'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
ENV_RE_STRING = r'[\w.]{1,32}'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
DATASTORE_ID_POLICY_LEGACY = 'legacy'
DATASTORE_ID_POLICY_DEFAULT = 'default'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
# Used for missing values; see http://b/issue?id=2073962.
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
# Expression meaning to skip no files, which is the default for AppInclude.
SKIP_NO_FILES = r'(?!)'
DEFAULT_NOBUILD_FILES = (r'^$')
# Attributes for `URLMap`
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
REDIRECT_HTTP_RESPONSE_CODE = 'redirect_http_response_code'
# Attributes for `AppInfoExternal`
APPLICATION = 'application'
PROJECT = 'project' # An alias for 'application'
MODULE = 'module'
SERVICE = 'service'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
BETA_SETTINGS = 'beta_settings'
VM_HEALTH_CHECK = 'vm_health_check'
HEALTH_CHECK = 'health_check'
RESOURCES = 'resources'
NETWORK = 'network'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
API_VERSION = 'api_version'
ENDPOINTS_API_SERVICE = 'endpoints_api_service'
ENV = 'env'
ENTRYPOINT = 'entrypoint'
RUNTIME_CONFIG = 'runtime_config'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
DATASTORE_AUTO_ID_POLICY = 'auto_id_policy'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
SOURCE_REPO_RE_STRING = r'^[a-z][a-z0-9\-\+\.]*:[^#]*$'
SOURCE_REVISION_RE_STRING = r'^[0-9a-fA-F]+$'
# Maximum size of all source references (in bytes) for a deployment.
SOURCE_REFERENCES_MAX_SIZE = 2048
INSTANCE_CLASS = 'instance_class'
# Attributes for Standard App Engine (only) AutomaticScaling.
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
# Attributes for Managed VMs (only) AutomaticScaling. These are very
# different than Standard App Engine because scaling settings are
# mapped to Cloud Autoscaler (as opposed to the clone scheduler). See
# AutoscalingConfig in
MIN_NUM_INSTANCES = 'min_num_instances'
MAX_NUM_INSTANCES = 'max_num_instances'
COOL_DOWN_PERIOD_SEC = 'cool_down_period_sec'
CPU_UTILIZATION = 'cpu_utilization'
CPU_UTILIZATION_UTILIZATION = 'target_utilization'
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC = 'aggregation_window_length_sec'
# Managed VMs Richer Autoscaling. These (MVMs only) scaling settings
# are supported for both vm:true and env:2|flex, but are not yet
# publicly documented.
TARGET_NETWORK_SENT_BYTES_PER_SEC = 'target_network_sent_bytes_per_sec'
TARGET_NETWORK_SENT_PACKETS_PER_SEC = 'target_network_sent_packets_per_sec'
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC = 'target_network_received_bytes_per_sec'
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC = (
'target_network_received_packets_per_sec')
TARGET_DISK_WRITE_BYTES_PER_SEC = 'target_disk_write_bytes_per_sec'
TARGET_DISK_WRITE_OPS_PER_SEC = 'target_disk_write_ops_per_sec'
TARGET_DISK_READ_BYTES_PER_SEC = 'target_disk_read_bytes_per_sec'
TARGET_DISK_READ_OPS_PER_SEC = 'target_disk_read_ops_per_sec'
TARGET_REQUEST_COUNT_PER_SEC = 'target_request_count_per_sec'
TARGET_CONCURRENT_REQUESTS = 'target_concurrent_requests'
# Attributes for ManualScaling
INSTANCES = 'instances'
# Attributes for BasicScaling
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
# Attributes for AdminConsole
PAGES = 'pages'
NAME = 'name'
# Attributes for EndpointsApiService
ENDPOINTS_NAME = 'name'
CONFIG_ID = 'config_id'
# Attributes for ErrorHandlers
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
# Attributes for BuiltinHandler
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
# Attributes for `VmHealthCheck`. Please refer to message `VmHealthCheck` in
# `request_path` and `port` are not configurable yet.
ENABLE_HEALTH_CHECK = 'enable_health_check'
CHECK_INTERVAL_SEC = 'check_interval_sec'
TIMEOUT_SEC = 'timeout_sec'
UNHEALTHY_THRESHOLD = 'unhealthy_threshold'
HEALTHY_THRESHOLD = 'healthy_threshold'
RESTART_THRESHOLD = 'restart_threshold'
HOST = 'host'
# Attributes for Resources.
CPU = 'cpu'
MEMORY_GB = 'memory_gb'
DISK_SIZE_GB = 'disk_size_gb'
# Attributes for Resources:Volumes.
VOLUMES = 'volumes'
VOLUME_NAME = 'name'
VOLUME_TYPE = 'volume_type'
SIZE_GB = 'size_gb'
# Attributes for Network.
FORWARDED_PORTS = 'forwarded_ports'
INSTANCE_TAG = 'instance_tag'
NETWORK_NAME = 'name'
SUBNETWORK_NAME = 'subnetwork_name'
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
latest_version,
default_version=None,
deprecated_versions=None,
experimental_versions=None):
"""Initializer for `_VersionedLibrary`.
Args:
name: The name of the library; for example, `django`.
url: The URL for the library's project page; for example,
`http://www.djangoproject.com/`.
description: A short description of the library; for example,
`A framework...`.
supported_versions: A list of supported version names, ordered by release
date; for example, `["v1", "v2", "v3"]`.
latest_version: The version of the library that will be used when you
specify `latest.` The rule of thumb is that this value should be the
newest version that is neither deprecated nor experimental; however
this value might be an experimental version if all of the supported
versions are either deprecated or experimental.
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime, or `None` if the library is not available
by default; for example, `v1`.
deprecated_versions: A list of the versions of the library that have been
deprecated; for example, `["v1", "v2"]`.
experimental_versions: A list of the versions of the library that are
currently experimental; for example, `["v1"]`.
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.latest_version = latest_version
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
@property
def non_deprecated_versions(self):
"""Retrieves the versions of the library that are not deprecated.
Returns:
A list of the versions of the library that are not deprecated.
"""
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'clearsilver',
'http://www.clearsilver.net/',
'A fast, powerful, and language-neutral HTML template system.',
['0.10.5'],
latest_version='0.10.5',
),
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5', '1.9'],
latest_version='1.4',
),
_VersionedLibrary(
'enum',
'https://pypi.python.org/pypi/enum34',
'A backport of the enum module introduced in python 3.4',
['0.9.23'],
latest_version='0.9.23',
),
_VersionedLibrary(
'endpoints',
'https://developers.google.com/appengine/docs/python/endpoints/',
'Libraries for building APIs in an App Engine application.',
['1.0'],
latest_version='1.0',
),
_VersionedLibrary(
'grpcio',
'http://http://www.grpc.io/',
'A high performance general RPC framework',
['1.0.0'],
latest_version='1.0.0',
default_version='1.0.0',
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6'],
latest_version='2.6',
),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3', '2.3.5'],
latest_version='2.3',
experimental_versions=['2.3.5'],
),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15', '0.23'],
latest_version='0.15',
),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
latest_version='1.2.0',
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4', '1.2.4', '1.2.5'],
latest_version='1.2.5',
experimental_versions=['1.2.4b4', '1.2.4', '1.2.5']
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1'],
latest_version='1.6.1',
),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7'],
latest_version='1.1.7',
),
_VersionedLibrary(
'protorpc',
'https://code.google.com/p/google-protorpc/',
'A framework for implementing HTTP-based remote procedure call (RPC) '
'services.',
['1.0'],
latest_version='1.0',
default_version='1.0',
),
_VersionedLibrary(
'pytz',
'https://pypi.python.org/pypi/pytz?',
'A library for cross-platform timezone calculations',
['2016.4'],
latest_version='2016.4',
default_version='2016.4',
),
_VersionedLibrary(
'crcmod',
'http://crcmod.sourceforge.net/',
'A library for generating Cyclic Redundancy Checks (CRC).',
['1.7'],
latest_version='1.7',
),
_VersionedLibrary(
'PyAMF',
'http://pyamf.appspot.com/index.html',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1', '0.7.2'],
latest_version='0.6.1',
experimental_versions=['0.7.2'],
),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptography functions such as random number generation.',
['2.3', '2.6', '2.6.1'],
latest_version='2.6',
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11'],
latest_version='0.6c11',
),
_VersionedLibrary(
'six',
'https://pypi.python.org/pypi/six',
'Abstract differences between py2.x and py3',
['1.9.0'],
latest_version='1.9.0',
),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7', '2.7.11'],
latest_version='2.7',
),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
latest_version='2.5.2',
default_version='2.3',
deprecated_versions=['2.3']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
latest_version='1.2.3',
default_version='1.1.1',
),
_VersionedLibrary(
'werkzeug',
'http://www.werkzeug.pocoo.org/',
'A WSGI utility library.',
['0.11.10'],
latest_version='0.11.10',
default_version='0.11.10',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
latest_version='3.10',
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
# A mapping from third-party name/version to a list of that library's
# dependencies.
REQUIRED_LIBRARIES = {
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
}
_USE_VERSION_FORMAT = ('use one of: "%s"')
# See RFC 2616 section 2.2.
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
# Source: http://www.cs.tut.fi/~jkorpela/http.html
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
# The minimum cookie length (i.e. number of bytes) that HTTP clients should
# support, per RFCs 2109 and 2965.
_MAX_COOKIE_LENGTH = 4096
# trailing NULL character, which is why this is not 2048.
_MAX_URL_LENGTH = 2047
# We allow certain headers to be larger than the normal limit of 8192 bytes.
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS = 10240
_CANNED_RUNTIMES = ('contrib-dart', 'dart', 'go', 'php', 'php55', 'python',
'python27', 'python-compat', 'java', 'java7', 'vm',
'custom', 'nodejs', 'ruby')
_all_runtimes = _CANNED_RUNTIMES
def GetAllRuntimes():
"""Returns the list of all valid runtimes.
This list can include third-party runtimes as well as canned runtimes.
Returns:
Tuple of strings.
"""
return _all_runtimes
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
# Common fields.
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
# Python/CGI fields.
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values to what `http_headers` allows.
`http_headers` is an static handler key; it applies to handlers with
`static_dir` or `static_files` keys. The following code is an example of how
`http_headers` is used::
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
# TODO(user): I don't think there's any reason to disallow users
# from setting Content-Encoding, but other parts of the system prevent
# this; therefore, we disallow it here. See the following discussion:
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-webkit-csp': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'content-security-policy-report-only':
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in `HttpHeadersDict` are valid.
`HttpHeadersDict` contains a list of headers. An instance is used as
`HttpHeadersDict`'s `KEY_VALIDATOR`.
"""
def Validate(self, name, unused_key=None):
"""Returns an argument, or raises an exception if the argument is invalid.
HTTP header names are defined by `RFC 2616, section 4.2`_.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: An argument cannot be used as an
HTTP header name.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
original_name = name
# Make sure only ASCII data is used.
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
# HTTP headers are case-insensitive.
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
# Request headers shouldn't be used in responses.
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
# Make sure that none of the reserved prefixes is used.
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in `HttpHeadersDict` are valid.
An instance is used as `HttpHeadersDict`'s `VALUE_VALIDATOR`.
"""
def Validate(self, value, key=None):
"""Returns a value, or raises an exception if the value is invalid.
According to `RFC 2616 section 4.2`_ header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string"::
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
A value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: An argument cannot be used as an
HTTP header value.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
# Make sure only ASCII data is used.
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
# HTTP headers are case-insensitive.
key = key.lower()
# TODO(user): This is the same check that appserver performs, but it
# could be stronger. e.g. `"foo` should not be considered valid, because
# HTTP does not allow unclosed double quote marks in header values, per
# RFC 2616 section 4.2.
printable = set(string.printable[:-5])
if not all(char in printable for char in value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
# The `>=` operator here is a little counter-intuitive. The reason for it
# is that I'm trying to follow the
# `HTTPProto::IsValidHeader` implementation.
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
# If execution reaches this point, it generally means the header is too
# long, but there are a few exceptions, which are listed in the next
# dict.
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
# We are dealing with one of the exceptional headers with larger maximum
# value lengths.
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to `header_name`. If more than one such
value is in `self`, one of the values is selected arbitrarily and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
# TODO(user): Perhaps, this functionality should be part of
# `validation.ValidatedDict`.
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
"""Maps from URLs to handlers.
This class acts similar to a union type. Its purpose is to describe a mapping
between a set of URLs and their handlers. The handler type of a given instance
is determined by which `handler-id` attribute is used.
Every mapping can have one and only one handler type. Attempting to use more
than one `handler-id` attribute will cause an `UnknownHandlerType` to be
raised during validation. Failure to provide any `handler-id` attributes will
cause `MissingHandlerType` to be raised during validation.
The regular expression used by the `url` field will be used to match against
the entire URL path and query string of the request; therefore, partial maps
will not be matched. Specifying a `url`, such as `/admin`, is the same as
matching against the regular expression `^/admin$`. Don't start your matching
`url` with `^` or end them with `$`. These regular expressions won't be
accepted and will raise `ValueError`.
Attributes:
login: Specifies whether a user should be logged in to access a URL.
The default value of this argument is `optional`.
secure: Sets the restriction on the protocol that can be used to serve this
URL or handler. This value can be set to `HTTP`, `HTTPS` or `either`.
url: Specifies a regular expression that is used to fully match against the
request URLs path. See the "Special cases" section of this document to
learn more.
static_files: Specifies the handler ID attribute that maps `url` to the
appropriate file. You can specify regular expression backreferences to
the string matched to `url`.
upload: Specifies the regular expression that is used by the application
configuration program to determine which files are uploaded as blobs.
Because it is difficult to determine this information using just the
`url` and `static_files` arguments, this attribute must be included.
This attribute is required when you define a `static_files` mapping. A
matching file name must fully match against the `upload` regular
expression, similar to how `url` is matched against the request path. Do
not begin the `upload` argument with the `^` character or end it with
the `$` character.
static_dir: Specifies the handler ID that maps the provided `url` to a
sub-directory within the application directory. See "Special cases."
mime_type: When used with `static_files` and `static_dir`, this argument
specifies that the MIME type of the files that are served from those
directories must be overridden with this value.
script: Specifies the handler ID that maps URLs to a script handler within
the application directory that will run using CGI.
position: Used in `AppInclude` objects to specify whether a handler should
be inserted at the beginning of the primary handler list or at the end.
If `tail` is specified, the handler is inserted at the end; otherwise,
the handler is inserted at the beginning. This behavior implies that
`head` is the effective default.
expiration: When used with static files and directories, this argument
specifies the time delta to use for cache expiration. This argument
should use the following format: `4d 5h 30m 15s`, where each letter
signifies days, hours, minutes, and seconds, respectively. The `s` for
"seconds" can be omitted. Only one amount must be specified, though
combining multiple amounts is optional. The following list contains
examples of values that are acceptable: `10`, `1d 6h`, `1h 30m`,
`7d 7d 7d`, `5m 30`.
api_endpoint: Specifies the handler ID that identifies an endpoint as an API
endpoint. Calls that terminate here will be handled by the API serving
framework.
Special cases:
When defining a `static_dir` handler, do not use a regular expression in the
`url` attribute. Both the `url` and `static_dir` attributes are
automatically mapped to these equivalents::
<url>/(.*)
<static_dir>/\1
For example, this declaration...::
url: /images
static_dir: images_folder
...is equivalent to this `static_files` declaration::
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
# Static file fields.
# File mappings are allowed to have regex back references.
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
# Static directory fields.
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
# Used in both static mappings.
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
# Python/CGI fields.
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
REDIRECT_HTTP_RESPONSE_CODE: validation.Optional(validation.Options(
'301', '302', '303', '307')),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([
URL, LOGIN, AUTH_FAIL_ACTION, SECURE, REDIRECT_HTTP_RESPONSE_CODE])
# The keys of this map are attributes which can be used to identify each
# mapping type in addition to the handler identifying attribute itself.
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Gets the handler for a mapping.
Returns:
The value of the handler, as determined by the handler ID attribute.
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Gets the handler type of a mapping.
Returns:
The handler type as determined by which handler ID attribute is set.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
MissingHandlerAttribute: If a URL handler is missing an attribute.
"""
# Special case for the `api_endpoint` handler as it may have a `script`
# attribute as well.
if getattr(self, HANDLER_API_ENDPOINT) is not None:
# Matched id attribute, break out of loop.
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
# Attributes always exist as defined by ATTRIBUTES.
if getattr(self, id_field) is not None:
# Matched id attribute, break out of loop.
mapping_type = id_field
break
else:
# If no mapping type is found raise exception.
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
# Make sure that none of the set attributes on this handler
# are not allowed for the discovered handler type.
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
# Also check that static file map has 'upload'.
# NOTE: Add REQUIRED_FIELDS along with ALLOWED_FIELDS if any more
# exceptional cases arise.
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure a handler has correct fields.
In addition to normal `ValidatedCheck`, this method calls `GetHandlerType`,
which validates whether all of the handler fields are configured properly.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: If `mime_type` is inconsistent with
`http_headers`.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
# re how headers that affect caching interact per RFC 2616:
#
# Section 13.1.3 says that when there is "apparent conflict between
# [Cache-Control] header values, the most restrictive interpretation is
# applied".
#
# Section 14.21 says that Cache-Control: max-age overrides Expires
# headers.
#
# Section 14.32 says that Pragma: no-cache has no meaning in responses;
# therefore, we do not need to be concerned about that header here.
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that `self.http_headers` is consistent with `self.mime_type`.
This method assumes that `self` is a static handler, either
`self.static_dir` or `self.static_files`. You cannot specify `None`.
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: If `self.http_headers`
contains a `Content-Type` header, and `self.mime_type` is set. For
example, the following configuration would be rejected::
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
`http_headers` and `mime_type` specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Forces omitted `secure` handler fields to be set to 'secure: optional'.
The effect is that `handler.secure` is never equal to the nominal default.
"""
# See http://b/issue?id=2073962.
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See the `version element documentation`_ to learn which URLs are reserved.
.. _`version element documentation`:
https://cloud.google.com/appengine/docs/python/config/appref#syntax
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
Raises:
PositionUsedInAppYamlHandler: If the `position` attribute is specified for
an `app.yaml` file instead of an `include.yaml` file.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing the admin console page in an `AdminConsole` object."""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing an admin console directives in application info."""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Returns the result of merging two `AdminConsole` objects."""
# Right now this method only needs to worry about the pages attribute of
# `AdminConsole`. However, since this object is valid as part of an
# `AppInclude` object, any objects added to `AdminConsole` in the future
# must also be merged. Rather than burying the merge logic in the process
# of merging two `AppInclude` objects, it is centralized here. If you modify
# the `AdminConsole` object to support other objects, you must also modify
# this method to support merging those additional objects.
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info."""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing built-in handler directives in application info.
This class permits arbitrary keys, but their values must be described by the
`validation.Options` object that is returned by `ATTRIBUTES`.
"""
# `Validated` is a somewhat complicated class. It actually maintains two
# dictionaries: the `ATTRIBUTES` dictionary and an internal `__dict__` object
# that maintains key value pairs.
#
# The normal flow is that a key must exist in `ATTRIBUTES` in order to be able
# to be inserted into `__dict__`. So that's why we force the
# `ATTRIBUTES.__contains__` method to always return `True`; we want to accept
# any attribute. Once the method returns `True`, then its value will be
# fetched, which returns `ATTRIBUTES[key]`; that's why we override
# `ATTRIBUTES.__getitem__` to return the validator for a `BuiltinHandler`
# object.
#
# This is where it gets tricky. Once the validator object is returned, then
# `__dict__[key]` is set to the validated object for that key. However, when
# `CheckInitialized()` is called, it uses iteritems from `ATTRIBUTES` in order
# to generate a list of keys to validate. This expects the `BuiltinHandler`
# instance to contain every item in `ATTRIBUTES`, which contains every
# built-in name seen so far by any `BuiltinHandler`. To work around this,
# `__getattr__` always returns `None` for public attribute names. Note that
# `__getattr__` is only called if `__dict__` does not contain the key. Thus,
# the single built-in value set is validated.
#
# What's important to know is that in this implementation, only the keys in
# `ATTRIBUTES` matter, and only the values in `__dict__` matter. The values in
# `ATTRIBUTES` and the keys in `__dict__` are both ignored. The key in
# `__dict__` is only used for the `__getattr__` function, but to find out what
# keys are available, only `ATTRIBUTES` is ever read.
class DynamicAttributes(dict):
"""Provides a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any `get` operation. The fixed
value that you pass in as a constructor parameter should be a
`validation.Validated` object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensures all BuiltinHandler objects at least use the `default` attribute.
Args:
**attributes: The attributes that you want to use.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Allows `ATTRIBUTES.iteritems()` to return set of items that have values.
Whenever `validate` calls `iteritems()`, it is always called on
`ATTRIBUTES`, not on `__dict__`, so this override is important to ensure
that functions such as `ToYAML()` return the correct set of keys.
Args:
key: The key for the `iteritem` that you want to set.
value: The value for the `iteritem` that you want to set.
Raises:
MultipleBuiltinsSpecified: If more than one built-in is defined in a list
element.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
# Only the name of a built-in handler is currently allowed as an attribute
# so the object can only be set once. If later attributes are desired of
# a different form, this clause should be used to catch whenever more than
# one object does not match a predefined attribute name.
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
# `__getattr__` is only called for attributes that don't exist in the
# instance dictionary.
raise AttributeError
return None
def ToDict(self):
"""Converts a `BuiltinHander` object to a dictionary.
Returns:
A dictionary in `{builtin_handler_name: on/off}` form
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Finds if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: A list of `BuiltinHandler` objects, typically
`yaml.builtins`.
builtin_name: The name of the built-in that you want to determine whether
it is defined.
Returns:
`True` if `builtin_name` is defined by a member of `builtins_list`; all
other results return `False`.
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of `BuiltinHandler` objects.
Args:
builtins_list: A list of `BuildinHandler` objects to convert to tuples.
Returns:
A list of `(name, status)` that is derived from the `BuiltinHandler`
objects.
"""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verifies that all `BuiltinHandler` objects are valid and not repeated.
Args:
builtins_list: A list of `BuiltinHandler` objects to validate.
runtime: If you specify this argument, warnings are generated for
built-ins that have been deprecated in the given runtime.
Raises:
InvalidBuiltinFormat: If the name of a `BuiltinHandler` object cannot be
determined.
DuplicateBuiltinsSpecified: If a `BuiltinHandler` name is used more than
once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
# This checking must be done here rather than in `apphosting/ext/builtins`
# because `apphosting/ext/builtins` cannot differentiate between between
# built-ins specified in `app.yaml` versus ones added in a built-in
# include. There is a hole here where warnings are not generated for
# deprecated built-ins that appear in user-created include files.
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing `api_config` handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
# Make `URL` and `SCRIPT` required for `api_config` stanza
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Determines if the library configuration is not valid.
Raises:
appinfo_errors.InvalidLibraryName: If the specified library is not
supported.
appinfo_errors.InvalidLibraryVersion: If the specified library version is
not supported.
"""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version == 'latest':
self.version = supported_library.latest_version
elif self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
use_vers = '", "'.join(supported_library.non_deprecated_versions)
logging.warning(
'%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT,
self.name,
self.version,
use_vers)
class CpuUtilization(validation.Validated):
"""Class representing the configuration of VM CPU utilization."""
ATTRIBUTES = {
CPU_UTILIZATION_UTILIZATION: validation.Optional(
validation.Range(1e-6, 1.0, float)),
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC: validation.Optional(
validation.Range(1, sys.maxint)),
}
class EndpointsApiService(validation.Validated):
"""Class representing EndpointsApiService in AppInfoExternal."""
ATTRIBUTES = {
ENDPOINTS_NAME: validation.Regex(_NON_WHITE_SPACE_REGEX),
CONFIG_ID: validation.Regex(_NON_WHITE_SPACE_REGEX),
}
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST: validation.Optional(
_CONCURRENT_REQUESTS_REGEX),
# Attributes for VM-based AutomaticScaling.
MIN_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
MAX_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
COOL_DOWN_PERIOD_SEC: validation.Optional(
validation.Range(60, sys.maxint, int)),
CPU_UTILIZATION: validation.Optional(CpuUtilization),
TARGET_NETWORK_SENT_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_SENT_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_REQUEST_COUNT_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_CONCURRENT_REQUESTS:
validation.Optional(validation.Range(1, sys.maxint)),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class RuntimeConfig(validation.ValidatedDict):
"""Class for "vanilla" runtime configuration.
Fields used vary by runtime, so validation is delegated to the per-runtime
build processes.
These are intended to be used during Dockerfile generation, not after VM boot.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
The settings are not further validated here. The settings are validated on
the server side.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
"""Merges two `VmSettings` instances.
If a variable is specified by both instances, the value from
`vm_settings_one` is used.
Args:
vm_settings_one: The first `VmSettings` instance, or `None`.
vm_settings_two: The second `VmSettings` instance, or `None`.
Returns:
The merged `VmSettings` instance, or `None` if both input instances are
`None` or empty.
"""
# Note that `VmSettings.copy()` results in a dict.
result_vm_settings = (vm_settings_two or {}).copy()
# TODO(user): Apply merge logic when feature is fully defined.
# For now, we will merge the two dict and `vm_settings_one` will win
# if key collides.
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class BetaSettings(VmSettings):
"""Class for Beta (internal or unreleased) settings.
This class is meant to replace `VmSettings` eventually.
Note:
All new beta settings must be registered in `shared_constants.py`.
These settings are not validated further here. The settings are validated on
the server side.
"""
@classmethod
def Merge(cls, beta_settings_one, beta_settings_two):
"""Merges two `BetaSettings` instances.
Args:
beta_settings_one: The first `BetaSettings` instance, or `None`.
beta_settings_two: The second `BetaSettings` instance, or `None`.
Returns:
The merged `BetaSettings` instance, or `None` if both input instances are
`None` or empty.
"""
merged = VmSettings.Merge(beta_settings_one, beta_settings_two)
return BetaSettings(**merged.ToDict()) if merged else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key/value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, env_variables_one, env_variables_two):
"""Merges two `EnvironmentVariables` instances.
If a variable is specified by both instances, the value from
`env_variables_two` is used.
Args:
env_variables_one: The first `EnvironmentVariables` instance or `None`.
env_variables_two: The second `EnvironmentVariables` instance or `None`.
Returns:
The merged `EnvironmentVariables` instance, or `None` if both input
instances are `None` or empty.
"""
# Note that `EnvironmentVariables.copy()` results in a dict.
result_env_variables = (env_variables_one or {}).copy()
result_env_variables.update(env_variables_two or {})
return (EnvironmentVariables(**result_env_variables)
if result_env_variables else None)
def ValidateSourceReference(ref):
"""Determines if a source reference is valid.
Args:
ref: A source reference in the following format:
`[repository_uri#]revision`.
Raises:
ValidationError: If the reference is malformed.
"""
repo_revision = ref.split('#', 1)
revision_id = repo_revision[-1]
if not re.match(SOURCE_REVISION_RE_STRING, revision_id):
raise validation.ValidationError('Bad revision identifier: %s' %
revision_id)
if len(repo_revision) == 2:
uri = repo_revision[0]
if not re.match(SOURCE_REPO_RE_STRING, uri):
raise validation.ValidationError('Bad repository URI: %s' % uri)
def ValidateCombinedSourceReferencesString(source_refs):
"""Determines if `source_refs` contains a valid list of source references.
Args:
source_refs: A multi-line string containing one source reference per line.
Raises:
ValidationError: If the reference is malformed.
"""
if len(source_refs) > SOURCE_REFERENCES_MAX_SIZE:
raise validation.ValidationError(
'Total source reference(s) size exceeds the limit: %d > %d' % (
len(source_refs), SOURCE_REFERENCES_MAX_SIZE))
for ref in source_refs.splitlines():
ValidateSourceReference(ref.strip())
class HealthCheck(validation.Validated):
"""Class representing the health check configuration."""
ATTRIBUTES = {
ENABLE_HEALTH_CHECK: validation.Optional(validation.TYPE_BOOL),
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxint)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxint)),
UNHEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
RESTART_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HOST: validation.Optional(validation.TYPE_STR)}
class VmHealthCheck(HealthCheck):
"""Class representing the configuration of the VM health check.
Note:
This class is deprecated and will be removed in a future release. Use
`HealthCheck` instead.
"""
pass
class Volume(validation.Validated):
"""Class representing the configuration of a volume."""
ATTRIBUTES = {
VOLUME_NAME: validation.TYPE_STR,
SIZE_GB: validation.TYPE_FLOAT,
VOLUME_TYPE: validation.TYPE_STR,
}
class Resources(validation.Validated):
"""Class representing the configuration of VM resources."""
ATTRIBUTES = {
CPU: validation.Optional(validation.TYPE_FLOAT),
MEMORY_GB: validation.Optional(validation.TYPE_FLOAT),
DISK_SIZE_GB: validation.Optional(validation.TYPE_INT),
VOLUMES: validation.Optional(validation.Repeated(Volume))
}
class Network(validation.Validated):
"""Class representing the VM network configuration."""
ATTRIBUTES = {
# A list of port mappings in the form 'port' or 'external:internal'.
FORWARDED_PORTS: validation.Optional(validation.Repeated(validation.Regex(
'[0-9]+(:[0-9]+)?(/(udp|tcp))?'))),
INSTANCE_TAG: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
NETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
SUBNETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
}
class AppInclude(validation.Validated):
"""Class representing the contents of an included `app.yaml` file.
This class is used for both `builtins` and `includes` directives.
"""
# TODO(user): It probably makes sense to have a scheme where we do a
# deep-copy of fields from `AppInfoExternal` when setting the `ATTRIBUTES`
# here. Right now it's just copypasta.
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BETA_SETTINGS: validation.Optional(BetaSettings),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
SKIP_FILES: validation.RegexStr(default=SKIP_NO_FILES),
# TODO(user): add `LIBRARIES` here when we have a good story for
# handling contradictory library requests.
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of `<manual_scaling.instances>` from the arguments.
`appinclude_one` is mutated to be the merged result in this process.
Also, this function must be updated if `ManualScaling` gets additional
fields.
Args:
appinclude_one: The first object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
appinclude_two: The second object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
Returns:
An object that is the result of merging
`appinclude_one.manual_scaling.instances` and
`appinclude_two.manual_scaling.instances`; this is returned as a revised
`appinclude_one` object after the mutations are complete.
"""
def _Instances(appinclude):
"""Determines the number of `manual_scaling.instances` sets.
Args:
appinclude: The include for which you want to determine the number of
`manual_scaling.instances` sets.
Returns:
The number of instances as an integer, or `None`.
"""
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
# We only want to mutate a param if at least one of the given
# arguments has manual_scaling.instances set.
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def _CommonMergeOps(cls, one, two):
"""This function performs common merge operations.
Args:
one: The first object that you want to merge.
two: The second object that you want to merge.
Returns:
An updated `one` object containing all merged data.
"""
# Merge `ManualScaling`.
AppInclude.MergeManualScaling(one, two)
# Merge `AdminConsole` objects.
one.admin_console = AdminConsole.Merge(one.admin_console,
two.admin_console)
# Preserve the specific value of `one.vm` (`None` or `False`) when neither
# are `True`.
one.vm = two.vm or one.vm
# Merge `VmSettings` objects.
one.vm_settings = VmSettings.Merge(one.vm_settings,
two.vm_settings)
# Merge `BetaSettings` objects.
if hasattr(one, 'beta_settings'):
one.beta_settings = BetaSettings.Merge(one.beta_settings,
two.beta_settings)
# Merge `EnvironmentVariables` objects. The values in `two.env_variables`
# override the ones in `one.env_variables` in case of conflict.
one.env_variables = EnvironmentVariables.Merge(one.env_variables,
two.env_variables)
one.skip_files = cls.MergeSkipFiles(one.skip_files, two.skip_files)
return one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""Merges an `app.yaml` file with referenced builtins/includes.
Args:
appyaml: The `app.yaml` file that you want to update with `appinclude`.
appinclude: The includes that you want to merge into `appyaml`.
Returns:
An updated `app.yaml` file that includes the directives you specified in
`appinclude`.
"""
# All merge operations should occur in this function or in functions
# referenced from this one. That makes it much easier to understand what
# goes wrong when included files are not merged correctly.
if not appinclude:
return appyaml
# Merge handlers while paying attention to `position` attribute.
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
# Get rid of the `position` attribute since we no longer need it, and is
# technically invalid to include in the resulting merged `app.yaml` file
# that will be sent when deploying the application.
h.position = None
appyaml.handlers.extend(tail)
appyaml = cls._CommonMergeOps(appyaml, appinclude)
appyaml.NormalizeVmSettings()
return appyaml
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""Merges the non-referential state of the provided `AppInclude`.
That is, `builtins` and `includes` directives are not preserved, but any
static objects are copied into an aggregate `AppInclude` object that
preserves the directives of both provided `AppInclude` objects.
`appinclude_one` is updated to be the merged result in this process.
Args:
appinclude_one: First `AppInclude` to merge.
appinclude_two: Second `AppInclude` to merge.
Returns:
`AppInclude` object that is the result of merging the static directives of
`appinclude_one` and `appinclude_two`. An updated version of
`appinclude_one` is returned.
"""
# If one or both `appinclude` objects were `None`, return the object that
# was not `None` or return `None`.
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
# Now, both `appincludes` are non-`None`.
# Merge handlers.
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
return cls._CommonMergeOps(appinclude_one, appinclude_two)
@staticmethod
def MergeSkipFiles(skip_files_one, skip_files_two):
"""Merges two `skip_files` directives.
Args:
skip_files_one: The first `skip_files` element that you want to merge.
skip_files_two: The second `skip_files` element that you want to merge.
Returns:
A list of regular expressions that are merged.
"""
if skip_files_one == SKIP_NO_FILES:
return skip_files_two
if skip_files_two == SKIP_NO_FILES:
return skip_files_one
return validation.RegexStr().Validate(
[skip_files_one, skip_files_two], SKIP_FILES)
# We exploit the handling of RegexStr where regex properties can be
# specified as a list of regexes that are then joined with |.
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a `yaml_object` builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language. For example,
you could specify `php-quercus` if this is a Java app that was generated
from PHP source using Quercus.
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific `expiration` set.
See the documentation for the `URLMap.expiration` field for more
information.
skip_files: A regular expression object. Files that match this regular
expression will not be uploaded by `appcfg.py`. For example::
skip_files: |
.svn.*|
#.*#
nobuild_files: A regular expression object. Files that match this regular
expression will not be built into the app. This directive is valid for
Go only.
api_config: URL root and script or servlet path for enhanced API serving.
"""
ATTRIBUTES = {
# Regular expressions for these attributes are defined in
# //apphosting/base/id_util.cc.
APPLICATION: validation.Optional(APPLICATION_RE_STRING),
# An alias for `APPLICATION`.
PROJECT: validation.Optional(APPLICATION_RE_STRING),
MODULE: validation.Optional(MODULE_ID_RE_STRING),
# `service` will replace `module` soon
SERVICE: validation.Optional(MODULE_ID_RE_STRING),
VERSION: validation.Optional(MODULE_VERSION_ID_RE_STRING),
RUNTIME: validation.Optional(RUNTIME_RE_STRING),
# A new `api_version` requires a release of the `dev_appserver`, so it
# is ok to hardcode the version names here.
API_VERSION: validation.Optional(API_VERSION_RE_STRING),
# The App Engine environment to run this version in. (VM vs. non-VM, etc.)
ENV: validation.Optional(ENV_RE_STRING),
ENDPOINTS_API_SERVICE: validation.Optional(EndpointsApiService),
# The SDK will use this for generated Dockerfiles
ENTRYPOINT: validation.Optional(validation.Type(str)),
RUNTIME_CONFIG: validation.Optional(RuntimeConfig),
INSTANCE_CLASS: validation.Optional(_INSTANCE_CLASS_REGEX),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings), # Deprecated
BETA_SETTINGS: validation.Optional(BetaSettings),
VM_HEALTH_CHECK: validation.Optional(VmHealthCheck), # Deprecated
HEALTH_CHECK: validation.Optional(HealthCheck),
RESOURCES: validation.Optional(Resources),
NETWORK: validation.Optional(Network),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
# TODO(arb): change to a regex when `validation.Repeated` supports it
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
DATASTORE_AUTO_ID_POLICY: validation.Optional(
validation.Options(DATASTORE_ID_POLICY_LEGACY,
DATASTORE_ID_POLICY_DEFAULT)),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
}
def CheckInitialized(self):
"""Performs non-regular expression-based validation.
The following are verified:
- At least one URL mapping is provided in the URL mappers.
- The number of URL mappers doesn't exceed `MAX_URL_MAPS`.
- The major version does not contain the string `-dot-`.
- If `api_endpoints` are defined, an `api_config` stanza must be
defined.
- If the `runtime` is `python27` and `threadsafe` is set, then no CGI
handlers can be used.
- The version name doesn't start with `BUILTIN_NAME_PREFIX`.
- If `redirect_http_response_code` exists, it is in the list of valid
300s.
- Module and service aren't both set. Services were formerly known as
modules.
Raises:
DuplicateLibrary: If `library_name` is specified more than once.
MissingURLMapping: If no `URLMap` object is present in the object.
TooManyURLMappings: If there are too many `URLMap` entries.
MissingApiConfig: If `api_endpoints` exists without an `api_config`.
MissingThreadsafe: If `threadsafe` is not set but the runtime requires it.
ThreadsafeWithCgiHandler: If the `runtime` is `python27`, `threadsafe` is
set and CGI handlers are specified.
TooManyScalingSettingsError: If more than one scaling settings block is
present.
RuntimeDoesNotSupportLibraries: If the libraries clause is used for a
runtime that does not support it, such as `python25`.
ModuleAndServiceDefined: If both `module` and `service` keywords are used.
Services were formerly known as modules.
"""
super(AppInfoExternal, self).CheckInitialized()
if self.runtime is None and not self.IsVm():
raise appinfo_errors.MissingRuntimeError(
'You must specify a "runtime" field for non-vm applications.')
elif self.runtime is None:
# Default optional to custom (we don't do that in attributes just so
# we know that it's been defaulted)
self.runtime = 'custom'
if (not self.handlers and not self.builtins and not self.includes
and not self.IsVm()):
raise appinfo_errors.MissingURLMapping(
'No URLMap entries found in application configuration')
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
if self.service and self.module:
raise appinfo_errors.ModuleAndServiceDefined(
'Cannot define both "module" and "service" in configuration')
vm_runtime_python27 = (
self.runtime == 'vm' and
(hasattr(self, 'vm_settings') and
self.vm_settings and
self.vm_settings.get('vm_runtime') == 'python27') or
(hasattr(self, 'beta_settings') and
self.beta_settings and
self.beta_settings.get('vm_runtime') == 'python27'))
if (self.threadsafe is None and
(self.runtime == 'python27' or vm_runtime_python27)):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to a true or false YAML value')
if self.auto_id_policy == DATASTORE_ID_POLICY_LEGACY:
datastore_auto_ids_url = ('http://developers.google.com/'
'appengine/docs/python/datastore/'
'entities#Kinds_and_Identifiers')
appcfg_auto_ids_url = ('http://developers.google.com/appengine/docs/'
'python/config/appconfig#auto_id_policy')
logging.warning(
"You have set the datastore auto_id_policy to 'legacy'. It is "
"recommended that you select 'default' instead.\n"
"Legacy auto ids are deprecated. You can continue to allocate\n"
"legacy ids manually using the allocate_ids() API functions.\n"
"For more information see:\n"
+ datastore_auto_ids_url + '\n' + appcfg_auto_ids_url + '\n')
if (hasattr(self, 'beta_settings') and self.beta_settings
and self.beta_settings.get('source_reference')):
ValidateCombinedSourceReferencesString(
self.beta_settings.get('source_reference'))
if self.libraries:
if not (vm_runtime_python27 or self.runtime == 'python27'):
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if self.threadsafe and self.runtime == 'python27':
# VMEngines can handle python25 handlers, so we don't include
# vm_runtime_python27 in the if statement above.
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all `Library` instances active for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries as well as any required
dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized `Library` instances for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries, their required dependencies, and
any libraries enabled by default. Any libraries with `latest` as their
version will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the `AppInfoExternal`.
Backend entries can contain directives that modify other parts of the
`app.yaml` file, such as the `start` directive, which adds a handler for the
start request. This method performs those modifications.
Args:
backend_name: The name of a backend that is defined in the `backends`
directive.
Raises:
BackendNotFound: If the indicated backend was not listed in the
`backends` directive.
DuplicateBackend: If the backend is found more than once in the `backends`
directive.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def GetEffectiveRuntime(self):
"""Returns the app's runtime, resolving VMs to the underlying `vm_runtime`.
Returns:
The effective runtime: The value of `beta/vm_settings.vm_runtime` if
`runtime` is `vm`, or `runtime` otherwise.
"""
if (self.runtime == 'vm' and hasattr(self, 'vm_settings')
and self.vm_settings is not None):
return self.vm_settings.get('vm_runtime')
if (self.runtime == 'vm' and hasattr(self, 'beta_settings')
and self.beta_settings is not None):
return self.beta_settings.get('vm_runtime')
return self.runtime
def SetEffectiveRuntime(self, runtime):
"""Sets the runtime while respecting vm runtimes rules for runtime settings.
Args:
runtime: The runtime to use.
"""
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
# Patch up vm runtime setting. Copy `runtime` to `vm_runtime` and set
# runtime to the string `vm`.
self.vm_settings['vm_runtime'] = runtime
self.runtime = 'vm'
else:
self.runtime = runtime
def NormalizeVmSettings(self):
"""Normalizes VM settings."""
# NOTE(user): In the input files, `vm` is not a type of runtime, but
# rather is specified as `vm: true|false`. In the code, `vm` is represented
# as a value of `AppInfoExternal.runtime`.
# NOTE(user): This hack is only being applied after the parsing of
# `AppInfoExternal`. If the `vm` attribute can ever be specified in the
# `AppInclude`, then this processing will need to be done there too.
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
if 'vm_runtime' not in self.vm_settings:
self.SetEffectiveRuntime(self.runtime)
# Copy fields that are automatically added by the SDK or this class
# to `beta_settings`.
if hasattr(self, 'beta_settings') and self.beta_settings:
# Only copy if `beta_settings` already exists, because we have logic in
# `appversion.py` to discard all of `vm_settings` if anything is in
# `beta_settings`. So we won't create an empty one just to add these
# fields.
for field in ['vm_runtime',
'has_docker_image',
'image',
'module_yaml_path']:
if field not in self.beta_settings and field in self.vm_settings:
self.beta_settings[field] = self.vm_settings[field]
# TODO(user): `env` replaces `vm`. Remove `vm` when field is removed.
def IsVm(self):
return (self.vm or
self.env in ['2', 'flex', 'flexible'])
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (`URLMap`) objects.
Args:
handlers: A list of a handler (`URLMap`) objects.
is_include_file: If this argument is set to `True`, the handlers that are
added as part of the `includes` directive are validated.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Loads a single `AppInfo` object where one and only one is expected.
This method validates that the the values in the `AppInfo` match the
validators that are defined in this file, in particular,
`AppInfoExternal.ATTRIBUTES`.
Args:
app_info: A file-like object or string. If the argument is a string, the
argument is parsed as a configuration file. If the argument is a
file-like object, the data is read, then parsed.
Returns:
An instance of `AppInfoExternal` as loaded from a YAML file.
Raises:
ValueError: If a specified service is not valid.
EmptyConfigurationFile: If there are no documents in YAML file.
MultipleConfigurationFile: If more than one document exists in the YAML
file.
DuplicateBackend: If a backend is found more than once in the `backends`
directive.
yaml_errors.EventError: If the `app.yaml` file fails validation.
appinfo_errors.MultipleProjectNames: If the `app.yaml` file has both an
`application` directive and a `project` directive.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
# Allow `project: name` as an alias for `application: name`. If found, we
# change the `project` field to `None`. (Deleting it would make a distinction
# between loaded and constructed `AppInfoExternal` objects, since the latter
# would still have the project field.)
if appyaml.application and appyaml.project:
raise appinfo_errors.MultipleProjectNames(
'Specify one of "application: name" or "project: name"')
elif appyaml.project:
appyaml.application = appyaml.project
appyaml.project = None
appyaml.NormalizeVmSettings()
return appyaml
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
This class is used to pass back information about the newly created app to
users after a new version has been created.
"""
# NOTE(user): Before you consider adding anything to this YAML definition,
# you must solve the issue that old SDK versions will try to parse this new
# value with the old definition and fail. Basically we are stuck with this
# definition for the time being. The parsing of the value is done in
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: MODULE_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Loads a single `AppInclude` object where one and only one is expected.
Args:
app_include: A file-like object or string. The argument is set to a string,
the argument is parsed as a configuration file. If the argument is set
to a file-like object, the data is read and parsed.
Returns:
An instance of `AppInclude` as loaded from a YAML file.
Raises:
EmptyConfigurationFile: If there are no documents in the YAML file.
MultipleConfigurationFile: If there is more than one document in the YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches `_DELTA_REGEX`.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
#####################################################################
# These regexps must be the same as those in apphosting/client/app_config.cc
# and java/com/google/appengine/tools/admin/AppVersionUpload.java
# java/com/google/apphosting/admin/legacy/LegacyAppInfo.java,
# apphosting/client/app_config_old.cc,
# apphosting/api/app_config/app_config_server2.cc
# Valid characters for a file name.
_file_path_positive_re = re.compile(r'^.{1,256}$')
# Forbid `.`, `..`, and leading `-`, `_ah/` or `/`
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/|^/')
# Forbid `//` and trailing `/`
_file_path_negative_2_re = re.compile(r'//|/$')
# Forbid any use of space other than in the middle of a directory or file
# name.
_file_path_negative_3_re = re.compile(r'^ | $|/ | /')
# (erinjerison) Lint seems to think I'm specifying the word "character" as an
# argument. This isn't the case; it's part of a list to enable the list to
# build properly. Disabling it for now.
# pylint: disable=g-doc-args
def ValidFilename(filename):
"""Determines if a file name is valid.
Args:
filename: The file name to validate. The file name must be a valid file
name:
- It must only contain letters, numbers, and the following special
characters: `@`, `_`, `+`, `/` `$`, `.`, `-`, or '~'.
- It must be less than 256 characters.
- It must not contain `/./`, `/../`, or `//`.
- It must not end in `/`.
- All spaces must be in the middle of a directory or file name.
Returns:
An error string if the file name is invalid. `''` is returned if the file
name is valid.
"""
if _file_path_positive_re.match(filename) is None:
return 'Invalid character in filename: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
|
apache-2.0
|
nickgentoo/scikit-learn-graph
|
scripts/CMS_cross_validation_trees.py
|
1
|
5994
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '',''))
import numpy as np
#from skgraph import datasets
from sklearn import svm
#from skgraph.ioskgraph import *
from math import sqrt
import sys
from sklearn.metrics import roc_auc_score
from sklearn.svm import LinearSVC
from sklearn import linear_model
from skgraph.utils.countminsketch import CountMinSketch
from skgraph.datasets.load_tree_datasets import dispatch
import networkx
import matplotlib.pyplot as plt
import multiprocessing
from skgraph.feature_extraction.graph.ODDSTVectorizer import ODDSTVectorizer
from skgraph.feature_extraction.graph.NSPDK.NSPDKVectorizer import NSPDKVectorizer
from skgraph.feature_extraction.graph.WLVectorizer import WLVectorizer
#"sys.path.append('..\\..\\Multiple Kernel Learning\\Framework')"
if len(sys.argv)<4:
sys.exit("python cross_validation_from_matrix_norm.py dataset kernel C outfile m d [class_weight:auto]")
kernel=sys.argv[2]
c=float(sys.argv[3])
m=int(sys.argv[5])
d=int(sys.argv[6])
auto_weight=False
if len(sys.argv)==7:
if sys.argv[6]=="auto":
auto_weight=True
##TODO read from libsvm format
max_radius=3
normalization=False
def generateCMS(featuresCMS,ex,i):
exCMS = CountMinSketch(m, d)
# W=csr_matrix(ex)
rows, cols = ex.nonzero()
# dot=0.0
for row, col in zip(rows, cols):
# ((row,col), ex[row,col])
value = ex[row, col]
# print col, ex[row,col]
# dot+=WCMS[col]*ex[row,col]
exCMS.add(col, value)
# print dot
# TODO aggiungere bias
featuresCMS[i]=exCMS.asarray()
g_it=dispatch(sys.argv[1])
# graph=g_it.graphs[0]
# node_positions = networkx.spring_layout(graph)
# networkx.draw_networkx_nodes(graph, node_positions, with_labels = True)
# networkx.draw_networkx_edges(graph, node_positions, style='dashed')
# plt.show()
if kernel == "WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
Vectorizer = WLVectorizer(r=max_radius, normalization=normalization)
elif kernel == "ODDST":
print "Using ST kernel"
Vectorizer = ODDSTVectorizer(r=max_radius, l=la, normalization=normalization)
elif kernel == "NSPDK":
print "Using NSPDK kernel, lambda parameter interpreted as d"
Vectorizer = NSPDKVectorizer(r=max_radius, d=int(la), normalization=normalization)
else:
print "Unrecognized kernel"
features = Vectorizer.transform(g_it.graphs)
target_array=np.array(g_it.target)
#features, target_array =
#print km
print "original shape", features.shape
print "features loaded, hashing..."
featuresCMS=[0]*features.shape[0]
for i in xrange(features.shape[0]):
generateCMS(featuresCMS,features[i][0],i)
#pool = multiprocessing.Pool(processes=4)
#pool.map(generateCMS, features[i][0],i)
# pool.close()
# pool.join()
# exCMS=CountMinSketch(m,d)
#
# ex=features[i][0]
# #W=csr_matrix(ex)
#
# rows,cols = ex.nonzero()
# #dot=0.0
# for row,col in zip(rows,cols):
# #((row,col), ex[row,col])
# value=ex[row,col]
# #print col, ex[row,col]
# #dot+=WCMS[col]*ex[row,col]
# exCMS.add(col,value)
# #print dot
# #TODO aggiungere bias
# featuresCMS.append(exCMS.asarray())
print "hashing done"
features=np.matrix(featuresCMS)
print features.shape
print features[i].shape
from sklearn import cross_validation
for rs in range(42,53):
f=open(str(sys.argv[3]+".seed"+str(rs)+".c"+str(c)),'w')
kf = cross_validation.StratifiedKFold(target_array, n_folds=10, shuffle=True,random_state=rs)
#print kf
#remove column zero because
#first entry of each line is the index
#gram=km[:,1:].todense()
f.write("Total examples "+str(features.shape[0])+"\n")
f.write("CV\t test_AUROC\n")
sc=[]
for train_index, test_index in kf:
#print("TRAIN:", train_index, "TEST:", test_index)
#generated train and test lists, incuding indices of the examples in training/test
#for the specific fold. Indices starts from 0 now
if auto_weight==False:
clf = svm.LinearSVC(C=c,dual=True) #, class_weight='auto'
else:
print "Class weights automatically assigned from training data"
clf = svm.LinearSVC(C=c,dual=True, class_weight='auto')
#clf = svm.SVC(C=c,probability=True, class_weight='auto',kernel='linear') #,probability=True,
#clf = linear_model.LogisticRegression(C=c, dual=True, class_weight='auto')#, solver='liblinear'
#generate train features and test features
X_train, X_test, y_train, y_test = features[train_index], features[test_index], target_array[train_index], target_array[test_index]
#COMPUTE INNERKFOLD
kf = cross_validation.StratifiedKFold(y_train, n_folds=10, shuffle=True,random_state=rs)
inner_scores= cross_validation.cross_val_score(
clf, X_train, y_train, cv=kf, scoring='roc_auc')
#print "inner scores", inner_scores
print "Inner AUROC: %0.4f (+/- %0.4f)" % (inner_scores.mean(), inner_scores.std() / 2)
f.write(str(inner_scores.mean())+"\t")
clf.fit(X_train, y_train)
# predict on test examples
#LibLinear does not support multiclass
y_test_predicted=clf.decision_function(X_test)
#y_test_predicted=clf.predict_proba(X_test)
# #print y_test_predicted
# sc.append(roc_auc_score(y_test, y_test_predicted[:,1]))
# f.write(str(roc_auc_score(y_test, y_test_predicted[:,1]))+"\n")
#LibLinear does not support multiclass
#print y_test_predicted
sc.append(roc_auc_score(y_test, y_test_predicted))
f.write(str(roc_auc_score(y_test, y_test_predicted))+"\n")
f.close()
scores=np.array(sc)
print "AUROC: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() / 2)
|
gpl-3.0
|
dkushner/zipline
|
tests/modelling/test_frameload.py
|
11
|
6900
|
"""
Tests for zipline.data.ffc.frame.DataFrameFFCLoader
"""
from unittest import TestCase
from mock import patch
from numpy import arange
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Int64Index,
)
from zipline.lib.adjustment import (
Float64Add,
Float64Multiply,
Float64Overwrite,
)
from zipline.data.equities import USEquityPricing
from zipline.data.ffc.frame import (
ADD,
DataFrameFFCLoader,
MULTIPLY,
OVERWRITE,
)
from zipline.utils.tradingcalendar import trading_day
class DataFrameFFCLoaderTestCase(TestCase):
def setUp(self):
self.nsids = 5
self.ndates = 20
self.sids = Int64Index(range(self.nsids))
self.dates = DatetimeIndex(
start='2014-01-02',
freq=trading_day,
periods=self.ndates,
)
self.mask = DataFrame(
True,
index=self.dates,
columns=self.sids,
dtype=bool,
)
def tearDown(self):
pass
def test_bad_input(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameFFCLoader(
USEquityPricing.close,
baseline,
)
with self.assertRaises(ValueError):
# Wrong column.
loader.load_adjusted_array([USEquityPricing.open], self.mask)
with self.assertRaises(ValueError):
# Too many columns.
loader.load_adjusted_array(
[USEquityPricing.open, USEquityPricing.close],
self.mask
)
def test_baseline(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameFFCLoader(
USEquityPricing.close,
baseline,
)
dates_slice = slice(None, 10, None)
sids_slice = slice(1, 3, None)
[adj_array] = loader.load_adjusted_array(
[USEquityPricing.close],
self.mask.iloc[dates_slice, sids_slice]
)
for idx, window in enumerate(adj_array.traverse(window_length=3)):
expected = baseline.values[dates_slice, sids_slice][idx:idx + 3]
assert_array_equal(window, expected)
def test_adjustments(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
# Use the dates from index 10 on and sids 1-3.
dates_slice = slice(10, None, None)
sids_slice = slice(1, 4, None)
# Adjustments that should actually affect the output.
relevant_adjustments = [
{
'sid': 1,
'start_date': None,
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 0.5,
'kind': MULTIPLY,
},
{
'sid': 2,
'start_date': self.dates[5],
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 1.0,
'kind': ADD,
},
{
'sid': 2,
'start_date': self.dates[15],
'end_date': self.dates[16],
'apply_date': self.dates[17],
'value': 1.0,
'kind': ADD,
},
{
'sid': 3,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': 99.0,
'kind': OVERWRITE,
},
]
# These adjustments shouldn't affect the output.
irrelevant_adjustments = [
{ # Sid Not Requested
'sid': 0,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Sid Unknown
'sid': 9999,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Not Requested
'sid': 2,
'start_date': self.dates[1],
'end_date': self.dates[2],
'apply_date': self.dates[3],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Before Known Data
'sid': 2,
'start_date': self.dates[0] - (2 * trading_day),
'end_date': self.dates[0] - trading_day,
'apply_date': self.dates[0] - trading_day,
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date After Known Data
'sid': 2,
'start_date': self.dates[-1] + trading_day,
'end_date': self.dates[-1] + (2 * trading_day),
'apply_date': self.dates[-1] + (3 * trading_day),
'value': -9999.0,
'kind': OVERWRITE,
},
]
adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments)
loader = DataFrameFFCLoader(
USEquityPricing.close,
baseline,
adjustments=adjustments,
)
expected_baseline = baseline.iloc[dates_slice, sids_slice]
formatted_adjustments = loader.format_adjustments(
self.dates[dates_slice],
self.sids[sids_slice],
)
expected_formatted_adjustments = {
6: [
Float64Multiply(first_row=0, last_row=5, col=0, value=0.5),
Float64Add(first_row=0, last_row=5, col=1, value=1.0),
],
7: [
Float64Add(first_row=5, last_row=6, col=1, value=1.0),
],
8: [
Float64Overwrite(first_row=6, last_row=7, col=2, value=99.0)
],
}
self.assertEqual(formatted_adjustments, expected_formatted_adjustments)
mask = self.mask.iloc[dates_slice, sids_slice]
with patch('zipline.data.ffc.frame.adjusted_array') as m:
loader.load_adjusted_array(
columns=[USEquityPricing.close],
mask=mask,
)
self.assertEqual(m.call_count, 1)
args, kwargs = m.call_args
assert_array_equal(kwargs['data'], expected_baseline.values)
assert_array_equal(kwargs['mask'], mask.values)
self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
|
apache-2.0
|
Mistobaan/tensorflow
|
tensorflow/python/estimator/inputs/queues/feeding_functions.py
|
10
|
18972
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""
Recursively fills padded arr with elements from seq.
If length of seq is less than arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data sampels of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [seq.shape[:-1] if len(seq.shape) > 0 else -1
for seq in batch_key_item]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [seq.shape[-1] if len(seq.shape) > 0 else 0
for seq in batch_key_item]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())}
else:
feed_dict = {key: np.asarray(item)
for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(map(
lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
|
apache-2.0
|
PatrickOReilly/scikit-learn
|
benchmarks/bench_rcv1_logreg_convergence.py
|
149
|
7173
|
# Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
|
bsd-3-clause
|
vitan/blaze
|
blaze/tests/test_mysql_into.py
|
1
|
5591
|
from __future__ import absolute_import, division, print_function
import pytest
pymysql = pytest.importorskip('pymysql')
import subprocess
ps = subprocess.Popen("ps aux | grep '[m]ysql'",shell=True, stdout=subprocess.PIPE)
output = ps.stdout.read()
num_processes = len(output.splitlines())
pytestmark = pytest.mark.skipif(num_processes < 3, reason="No MySQL Installation")
from blaze import SQL
from blaze import CSV
from blaze.api.into import into
import sqlalchemy
import os
import csv as csv_module
import pandas as pd
import datetime as dt
import numpy as np
import getpass
username = getpass.getuser()
url = 'mysql+pymysql://{0}@localhost:3306/test'.format(username)
file_name = 'test.csv'
file_name_floats = 'test_floats.csv'
def create_csv(data, file_name):
with open(file_name, 'w') as f:
csv_writer = csv_module.writer(f)
for row in data:
csv_writer.writerow(row)
def setup_function(function):
data = [(1, 2), (10, 20), (100, 200)]
data_floats = [(1.02, 2.02), (102.02, 202.02), (1002.02, 2002.02)]
create_csv(data,file_name)
create_csv(data_floats,file_name_floats)
def teardown_function(function):
os.remove(file_name)
os.remove(file_name_floats)
engine = sqlalchemy.create_engine(url)
metadata = sqlalchemy.MetaData()
metadata.reflect(engine)
for t in metadata.tables:
if 'testtable' in t:
# pass
metadata.tables[t].drop(engine)
def test_csv_postgres_load():
tbl = 'testtable'
engine = sqlalchemy.create_engine(url)
if engine.has_table(tbl):
metadata = sqlalchemy.MetaData()
metadata.reflect(engine)
t = metadata.tables[tbl]
t.drop(engine)
csv = CSV(file_name)
sql = SQL(url,tbl, schema=csv.schema)
engine = sql.engine
conn = engine.raw_connection()
cursor = conn.cursor()
full_path = os.path.abspath(file_name)
load = '''LOAD DATA INFILE '{0}' INTO TABLE {1} FIELDS TERMINATED BY ','
lines terminated by '\n'
'''.format(full_path, tbl)
cursor.execute(load)
conn.commit()
def test_simple_into():
tbl = 'testtable_into_2'
csv = CSV(file_name, columns=['a', 'b'])
sql = SQL(url,tbl, schema= csv.schema)
into(sql,csv, if_exists="replace")
assert list(sql[:, 'a']) == [1, 10, 100]
assert list(sql[:, 'b']) == [2, 20, 200]
def test_append():
tbl = 'testtable_into_append'
csv = CSV(file_name, columns=['a', 'b'])
sql = SQL(url,tbl, schema= csv.schema)
into(sql,csv, if_exists="replace")
assert list(sql[:, 'a']) == [1, 10, 100]
assert list(sql[:, 'b']) == [2, 20, 200]
into(sql,csv, if_exists="append")
assert list(sql[:, 'a']) == [1, 10, 100, 1, 10, 100]
assert list(sql[:, 'b']) == [2, 20, 200, 2, 20, 200]
def test_simple_float_into():
tbl = 'testtable_into_float'
csv = CSV(file_name_floats, columns=['a', 'b'])
sql = SQL(url,tbl, schema= csv.schema)
into(sql,csv, if_exists="replace")
assert list(sql[:, 'a']) == [1.02, 102.02, 1002.02]
assert list(sql[:, 'b']) == [2.02, 202.02, 2002.02]
def test_tryexcept_into():
tbl = 'testtable_into_2'
csv = CSV(file_name, columns=['a', 'b'])
sql = SQL(url,tbl, schema= csv.schema)
into(sql,csv, if_exists="replace", QUOTE="alpha", FORMAT="csv") # uses multi-byte character and
# fails over to using sql.extend()
assert list(sql[:, 'a']) == [1, 10, 100]
assert list(sql[:, 'b']) == [2, 20, 200]
@pytest.mark.xfail(raises=KeyError)
def test_failing_argument():
tbl = 'testtable_into_2'
csv = CSV(file_name, columns=['a', 'b'])
sql = SQL(url,tbl, schema= csv.schema)
into(sql,csv, if_exists="replace", skipinitialspace="alpha") # failing call
def test_no_header_no_columns():
tbl = 'testtable_into_2'
csv = CSV(file_name)
sql = SQL(url,tbl, schema= '{x: int, y: int}')
into(sql,csv, if_exists="replace")
assert list(sql[:, 'x']) == [1, 10, 100]
assert list(sql[:, 'y']) == [2, 20, 200]
def test_complex_into():
# data from: http://dummydata.me/generate
this_dir = os.path.dirname(__file__)
file_name = os.path.join(this_dir, 'dummydata.csv')
tbl = 'testtable_into_complex'
csv = CSV(file_name, schema='{Name: string, RegistrationDate: date, ZipCode: int64, Consts: float64}')
sql = SQL(url,tbl, schema=csv.schema)
into(sql,csv, if_exists="replace")
df = pd.read_csv(file_name, parse_dates=['RegistrationDate'])
assert sql[0] == csv[0]
#implement count method
print(len(list(sql[:])))
# assert sql[] == csv[-1]
for col in sql.columns:
#need to convert to python datetime
if col == "RegistrationDate":
py_dates = list(df['RegistrationDate'].astype(object).values)
py_dates = [dt.date(d.year, d.month, d.day) for d in py_dates]
assert list(sql[:,col]) == list(csv[:,col]) == py_dates
#handle floating point precision -- perhaps it's better to call out to assert_array_almost_equal
elif col == 'Consts':
## WARNING!!! Floats are truncated with MySQL and the assertion fails
sql_array = np.array(list(sql[:,col]))
csv_array = list(csv[:,col])
df_array = df[col].values
np.testing.assert_almost_equal(sql_array,csv_array, decimal=5)
np.testing.assert_almost_equal(sql_array,df_array, decimal=5)
else:
assert list(sql[:,col]) == list(csv[:,col]) == list(df[col].values)
#
|
bsd-3-clause
|
nliolios24/textrank
|
share/doc/networkx-1.9.1/examples/graph/napoleon_russian_campaign.py
|
44
|
3216
|
#!/usr/bin/env python
"""
Minard's data from Napoleon's 1812-1813 Russian Campaign.
http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import string
import networkx as nx
def minard_graph():
data1="""\
24.0,54.9,340000,A,1
24.5,55.0,340000,A,1
25.5,54.5,340000,A,1
26.0,54.7,320000,A,1
27.0,54.8,300000,A,1
28.0,54.9,280000,A,1
28.5,55.0,240000,A,1
29.0,55.1,210000,A,1
30.0,55.2,180000,A,1
30.3,55.3,175000,A,1
32.0,54.8,145000,A,1
33.2,54.9,140000,A,1
34.4,55.5,127100,A,1
35.5,55.4,100000,A,1
36.0,55.5,100000,A,1
37.6,55.8,100000,A,1
37.7,55.7,100000,R,1
37.5,55.7,98000,R,1
37.0,55.0,97000,R,1
36.8,55.0,96000,R,1
35.4,55.3,87000,R,1
34.3,55.2,55000,R,1
33.3,54.8,37000,R,1
32.0,54.6,24000,R,1
30.4,54.4,20000,R,1
29.2,54.3,20000,R,1
28.5,54.2,20000,R,1
28.3,54.3,20000,R,1
27.5,54.5,20000,R,1
26.8,54.3,12000,R,1
26.4,54.4,14000,R,1
25.0,54.4,8000,R,1
24.4,54.4,4000,R,1
24.2,54.4,4000,R,1
24.1,54.4,4000,R,1"""
data2="""\
24.0,55.1,60000,A,2
24.5,55.2,60000,A,2
25.5,54.7,60000,A,2
26.6,55.7,40000,A,2
27.4,55.6,33000,A,2
28.7,55.5,33000,R,2
29.2,54.2,30000,R,2
28.5,54.1,30000,R,2
28.3,54.2,28000,R,2"""
data3="""\
24.0,55.2,22000,A,3
24.5,55.3,22000,A,3
24.6,55.8,6000,A,3
24.6,55.8,6000,R,3
24.2,54.4,6000,R,3
24.1,54.4,6000,R,3"""
cities="""\
24.0,55.0,Kowno
25.3,54.7,Wilna
26.4,54.4,Smorgoni
26.8,54.3,Moiodexno
27.7,55.2,Gloubokoe
27.6,53.9,Minsk
28.5,54.3,Studienska
28.7,55.5,Polotzk
29.2,54.4,Bobr
30.2,55.3,Witebsk
30.4,54.5,Orscha
30.4,53.9,Mohilow
32.0,54.8,Smolensk
33.2,54.9,Dorogobouge
34.3,55.2,Wixma
34.4,55.5,Chjat
36.0,55.5,Mojaisk
37.6,55.8,Moscou
36.6,55.3,Tarantino
36.5,55.0,Malo-Jarosewii"""
c={}
for line in cities.split('\n'):
x,y,name=line.split(',')
c[name]=(float(x),float(y))
g=[]
for data in [data1,data2,data3]:
G=nx.Graph()
i=0
G.pos={} # location
G.pop={} # size
last=None
for line in data.split('\n'):
x,y,p,r,n=line.split(',')
G.pos[i]=(float(x),float(y))
G.pop[i]=int(p)
if last is None:
last=i
else:
G.add_edge(i,last,{r:int(n)})
last=i
i=i+1
g.append(G)
return g,c
if __name__ == "__main__":
(g,city)=minard_graph()
try:
import matplotlib.pyplot as plt
plt.figure(1,figsize=(11,5))
plt.clf()
colors=['b','g','r']
for G in g:
c=colors.pop(0)
node_size=[int(G.pop[n]/300.0) for n in G]
nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k')
for c in city:
x,y=city[c]
plt.text(x,y+0.1,c)
plt.savefig("napoleon_russian_campaign.png")
except ImportError:
pass
|
mit
|
BorisJeremic/Real-ESSI-Examples
|
analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_NonLinHardShear/Shear_Zone_Length/SZ_h_1/Normal_Stress_Plot.py
|
72
|
2800
|
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
|
cc0-1.0
|
maxalbert/blaze
|
blaze/compute/tests/test_csv_compute.py
|
13
|
4310
|
from blaze.compute.csv import pre_compute, CSV
from blaze import compute, discover, dshape, into, resource, join, concat
from blaze.utils import example, filetext, filetexts
from blaze.expr import symbol
from pandas import DataFrame, Series
import pandas.util.testing as tm
from datashape.predicates import iscollection
import numpy as np
import pandas as pd
from toolz import first
from collections import Iterator
from odo import odo
from odo.chunks import chunks
def test_pre_compute_on_small_csv_gives_dataframe():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
assert isinstance(pre_compute(s.species, csv), (Series, DataFrame))
def test_pre_compute_on_large_csv_gives_chunked_reader():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
assert isinstance(pre_compute(s.species, csv, comfortable_memory=10),
(chunks(pd.DataFrame), pd.io.parsers.TextFileReader))
def test_pre_compute_with_head_on_large_csv_yields_iterator():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
assert isinstance(pre_compute(s.species.head(), csv, comfortable_memory=10),
Iterator)
def test_compute_chunks_on_single_csv():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
expr = s.sepal_length.max()
assert compute(expr, {s: csv}, comfortable_memory=10, chunksize=50) == 7.9
def test_pre_compute_with_projection_projects_on_data_frames():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
result = pre_compute(s[['sepal_length', 'sepal_width']].distinct(),
csv, comfortable_memory=10)
assert set(first(result).columns) == \
set(['sepal_length', 'sepal_width'])
def test_pre_compute_calls_lean_projection():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
result = pre_compute(s.sort('sepal_length').species,
csv, comfortable_memory=10)
assert set(first(result).columns) == \
set(['sepal_length', 'species'])
def test_unused_datetime_columns():
ds = dshape('2 * {val: string, when: datetime}')
with filetext("val,when\na,2000-01-01\nb,2000-02-02") as fn:
csv = CSV(fn, has_header=True)
s = symbol('s', discover(csv))
assert into(list, compute(s.val, csv)) == ['a', 'b']
def test_multiple_csv_files():
d = {'mult1.csv': 'name,val\nAlice,1\nBob,2',
'mult2.csv': 'name,val\nAlice,3\nCharlie,4'}
data = [('Alice', 1), ('Bob', 2), ('Alice', 3), ('Charlie', 4)]
with filetexts(d) as fns:
r = resource('mult*.csv')
s = symbol('s', discover(r))
for e in [s, s.name, s.name.nunique(), s.name.count_values(),
s.val.mean()]:
a = compute(e, {s: r})
b = compute(e, {s: data})
if iscollection(e.dshape):
a, b = into(set, a), into(set, b)
assert a == b
def test_csv_join():
d = {'a.csv': 'a,b,c\n0,1,2\n3,4,5',
'b.csv': 'c,d,e\n2,3,4\n5,6,7'}
with filetexts(d):
resource_a = resource('a.csv')
resource_b = resource('b.csv')
a = symbol('a', discover(resource_a))
b = symbol('b', discover(resource_b))
tm.assert_frame_equal(
odo(
compute(join(a, b, 'c'), {a: resource_a, b: resource_b}),
pd.DataFrame,
),
# windows needs explicit int64 construction b/c default is int32
pd.DataFrame(np.array([[2, 0, 1, 3, 4],
[5, 3, 4, 6, 7]], dtype='int64'),
columns=list('cabde'))
)
def test_concat():
d = {'a.csv': 'a,b\n1,2\n3,4',
'b.csv': 'a,b\n5,6\n7,8'}
with filetexts(d):
a_rsc = resource('a.csv')
b_rsc = resource('b.csv')
a = symbol('a', discover(a_rsc))
b = symbol('b', discover(b_rsc))
tm.assert_frame_equal(
odo(
compute(concat(a, b), {a: a_rsc, b: b_rsc}), pd.DataFrame,
),
# windows needs explicit int64 construction b/c default is int32
pd.DataFrame(np.arange(1, 9, dtype='int64').reshape(4, 2),
columns=list('ab')),
)
|
bsd-3-clause
|
cython-testbed/pandas
|
pandas/tests/io/parser/skiprows.py
|
20
|
8200
|
# -*- coding: utf-8 -*-
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.errors import EmptyDataError
from pandas.compat import StringIO, range, lrange
class SkipRowsTests(object):
def test_skiprows_bug(self):
# see gh-505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# see gh-4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# see gh-9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_skiprow_with_newline(self):
# see gh-12775 and gh-10911
data = """id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1"""
expected = [[2, 'line 21\nline 22', 2],
[3, 'line 31', 1]]
expected = DataFrame(expected, columns=[
'id', 'text', 'num_lines'])
df = self.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(df, expected)
data = ('a,b,c\n~a\n b~,~e\n d~,'
'~f\n f~\n1,2,~12\n 13\n 14~')
expected = [['a\n b', 'e\n d', 'f\n f']]
expected = DataFrame(expected, columns=[
'a', 'b', 'c'])
df = self.read_csv(StringIO(data),
quotechar="~",
skiprows=[2])
tm.assert_frame_equal(df, expected)
data = ('Text,url\n~example\n '
'sentence\n one~,url1\n~'
'example\n sentence\n two~,url2\n~'
'example\n sentence\n three~,url3')
expected = [['example\n sentence\n two', 'url2']]
expected = DataFrame(expected, columns=[
'Text', 'url'])
df = self.read_csv(StringIO(data),
quotechar="~",
skiprows=[1, 3])
tm.assert_frame_equal(df, expected)
def test_skiprow_with_quote(self):
# see gh-12775 and gh-10911
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
expected = [[2, "line '21' line 22", 2],
[3, "line '31' line 32", 1]]
expected = DataFrame(expected, columns=[
'id', 'text', 'num_lines'])
df = self.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(df, expected)
def test_skiprow_with_newline_and_quote(self):
# see gh-12775 and gh-10911
data = """id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1"""
expected = [[2, "line \n'21' line 22", 2],
[3, "line \n'31' line 32", 1]]
expected = DataFrame(expected, columns=[
'id', 'text', 'num_lines'])
df = self.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(df, expected)
data = """id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1"""
expected = [[2, "line '21\n' line 22", 2],
[3, "line '31\n' line 32", 1]]
expected = DataFrame(expected, columns=[
'id', 'text', 'num_lines'])
df = self.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(df, expected)
data = """id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1"""
expected = [[2, "line '21\n' \r\tline 22", 2],
[3, "line '31\n' \r\tline 32", 1]]
expected = DataFrame(expected, columns=[
'id', 'text', 'num_lines'])
df = self.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(df, expected)
def test_skiprows_lineterminator(self):
# see gh-9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with default line terminators "LF" and "CRLF"
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
# "CR" is not respected with the Python parser yet
if self.engine == 'c':
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_skiprows_infield_quote(self):
# see gh-14459
data = 'a"\nb"\na\n1'
expected = DataFrame({'a': [1]})
df = self.read_csv(StringIO(data), skiprows=2)
tm.assert_frame_equal(df, expected)
def test_skiprows_callable(self):
data = 'a\n1\n2\n3\n4\n5'
skiprows = lambda x: x % 2 == 0
expected = DataFrame({'1': [3, 5]})
df = self.read_csv(StringIO(data), skiprows=skiprows)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'foo': [3, 5]})
df = self.read_csv(StringIO(data), skiprows=skiprows,
header=0, names=['foo'])
tm.assert_frame_equal(df, expected)
skiprows = lambda x: True
msg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, msg):
self.read_csv(StringIO(data), skiprows=skiprows)
# This is a bad callable and should raise.
msg = "by zero"
skiprows = lambda x: 1 / 0
with tm.assert_raises_regex(ZeroDivisionError, msg):
self.read_csv(StringIO(data), skiprows=skiprows)
|
bsd-3-clause
|
chanceraine/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4agg.py
|
70
|
4985
|
"""
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt4 import QtCore, QtGui, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQT( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasQT, FigureCanvasAgg ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
self.repaint( )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
#FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw(self)
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self.drawRect:
p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
p.end()
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter( self )
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.update()
# Added following line to improve realtime pan/zoom on windows:
QtGui.qApp.processEvents()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
l, b, w, h = bbox.bounds
t = b + h
self.update(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
|
agpl-3.0
|
QuLogic/burnman
|
examples/example_premite_isothermal.py
|
1
|
3877
|
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
This example is under construction.
requires:
teaches:
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
import pymc
seismic_model = burnman.seismic.PREM() # pick from .prem() .slow() .fast() (see code/seismic.py)
number_of_points = 20 #set on how many depth slices the computations should be done
depths = np.linspace(750.e3,2890.e3, number_of_points)
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
print "preparations done"
def calc_velocities(ref_rho, K_0, K_prime, G_0, G_prime):
rock = burnman.Mineral()
rock.params['V_0'] = 10.e-6
rock.params['molar_mass'] = ref_rho*rock.params['V_0']
rock.params['K_0'] = K_0
rock.params['Kprime_0'] = K_prime
rock.params['G_0'] = G_0
rock.params['Gprime_0'] = G_prime
temperature = np.empty_like(seis_p)
mat_rho, mat_vp, mat_vs, mat_vphi, mat_K, mat_G = burnman.velocities_from_rock(rock,seis_p, temperature)
return mat_rho, mat_vphi, mat_vs
def error(ref_rho, K_0, K_prime, G_0, G_prime):
rho, vphi, vs = calc_velocities(ref_rho, K_0, K_prime, G_0, G_prime)
vphi_chi = burnman.chi_factor(vphi, seis_vphi)
vs_chi = burnman.chi_factor(vs, seis_vs)
rho_chi = burnman.chi_factor(rho, seis_rho)
return rho_chi+vphi_chi+vs_chi
if __name__ == "__main__":
# Priors on unknown parameters:
ref_rho = pymc.Uniform('ref_rho', lower=3300., upper=4500.)
K_0 = pymc.Uniform('K_0', lower=200.e9, upper=300.e9)
K_prime = pymc.Uniform('Kprime_0', lower=3., upper=6.)
G_0 = pymc.Uniform('G_0', lower=50.e9, upper=250.e9)
G_prime = pymc.Uniform('Gprime_0', lower=0., upper=3.)
minerr = 1e100
@pymc.deterministic
def theta(p1=ref_rho,p2=K_0,p3=K_prime,p4=G_0,p5=G_prime):
global minerr
if (p1<0 or p2<0 or p3<0 or p4<0 or p5 < 0):
return 1e30
try:
e = error(p1,p2,p3,p4,p5)
if (e<minerr):
minerr=e
print "best fit", e, "values:", p1,p2/1.e9,p3,p4/1.e9,p5
return e
except ValueError:
return 1e20
sig = 1e-4
misfit = pymc.Normal('d',mu=theta,tau=1.0/(sig*sig),value=0,observed=True,trace=True)
model = dict(ref_rho=ref_rho, K_0=K_0, K_prime=K_prime, G_0=G_0, G_prime=G_prime, misfit=misfit)
things = ['ref_rho', 'K_0', 'K_prime', 'G_0', 'G_prime']
S = pymc.MAP(model)
S.fit( method = 'fmin')
rho, vphi, vs = calc_velocities(S.ref_rho.value, S.K_0.value, S.K_prime.value, S.G_0.value, S.G_prime.value)
plt.subplot(2,2,1)
plt.plot(seis_p/1.e9,vs/1000.,color='r',linestyle='-',marker='^',markerfacecolor='r',markersize=4)
plt.plot(seis_p/1.e9,seis_vs/1000.,color='k',linestyle='-',marker='v',markerfacecolor='k',markersize=4)
plt.ylim([4, 8])
plt.title("Vs (km/s)")
plt.subplot(2,2,2)
plt.plot(seis_p/1.e9,vphi/1000.,color='r',linestyle='-',marker='^',markerfacecolor='r',markersize=4)
plt.plot(seis_p/1.e9,seis_vphi/1000.,color='k',linestyle='-',marker='v',markerfacecolor='k',markersize=4)
plt.ylim([7, 12])
plt.title("Vphi (km/s)")
# plot density
plt.subplot(2,2,3)
plt.plot(seis_p/1.e9,rho/1000.,color='r',linestyle='-',marker='^',markerfacecolor='r',markersize=4,label='model 1')
plt.plot(seis_p/1.e9,seis_rho/1000.,color='k',linestyle='-',marker='v',markerfacecolor='k',markersize=4,label='ref')
plt.title("density (kg/m^3)")
plt.legend(loc='upper left')
plt.ylim([3, 7 ])
plt.show()
print "done"
|
gpl-2.0
|
leesavide/pythonista-docs
|
Documentation/matplotlib/mpl_examples/pylab_examples/plotfile_demo.py
|
13
|
1113
|
from pylab import plotfile, show, gca
import matplotlib.cbook as cbook
fname = cbook.get_sample_data('msft.csv', asfileobj=False)
fname2 = cbook.get_sample_data('data_x_x2_x3.csv', asfileobj=False)
# test 1; use ints
plotfile(fname, (0,5,6))
# test 2; use names
plotfile(fname, ('date', 'volume', 'adj_close'))
# test 3; use semilogy for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
# test 4; use semilogy for volume
plotfile(fname, (0,5,6), plotfuncs={5:'semilogy'})
#test 5; single subplot
plotfile(fname, ('date', 'open', 'high', 'low', 'close'), subplots=False)
# test 6; labeling, if no names in csv-file
plotfile(fname2, cols=(0,1,2), delimiter=' ',
names=['$x$', '$f(x)=x^2$', '$f(x)=x^3$'])
# test 7; more than one file per figure--illustrated here with a single file
plotfile(fname2, cols=(0, 1), delimiter=' ')
plotfile(fname2, cols=(0, 2), newfig=False, delimiter=' ') # use current figure
gca().set_xlabel(r'$x$')
gca().set_ylabel(r'$f(x) = x^2, x^3$')
# test 8; use bar for volume
plotfile(fname, (0,5,6), plotfuncs={5:'bar'})
show()
|
apache-2.0
|
Vimos/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
87
|
3903
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
igabr/Metis_Projects_Chicago_2017
|
03-Project-McNulty/helper_functions.py
|
1
|
3521
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import seaborn as sns
import warnings
import re
from datetime import datetime
warnings.filterwarnings("ignore")
def difference_in_days(date1, date2):
"""
The assumption here is that Date 2 > Date 1.
can always use abs(d2-d1).days to remove weird answers.
"""
d1 = datetime.strptime(date1, '%m-%d-%Y')
d2 = datetime.strptime(date2, '%m-%d-%Y')
return (d2-d1).days #can use other methods to get differene in months, years etc.
def clean_df_cols(col_list):
"""
input df.column
"""
regex = r"[\!\"\'\,\(\)\[\]\*\.\\]"
subset=''
clean_col_names = []
for i in col_list:
clean_name = re.sub(regex, subset, i).strip()
clean_col_names.append(clean_name)
return clean_col_names
def unique_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
drop_cols = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
drop_cols.add((cols[i], cols[j]))
return drop_cols
def extract_top_correlations(df, n=5):
"""
Extract the columns which are highly correlated.
Default value is top 5 columns. Play with this if you have a threshold in mind.
"""
corr_matrix = df.corr().abs().unstack()
unique_cols = unique_pairs(df)
corr_matrix = corr_matrix.drop(labels=unique_cols).sort_values(ascending=False)
return corr_matrix[0:n]
def skew(mean, median):
if mean > median:
print("Right skew")
if mean < median:
print("Left Skew")
diff = mean - median
print("Diff between mean and median is : {}".format(diff))
def lookup_description(col_name):
return data_dict[col_name]
def percentage_missing(df):
"""
Calculates missing data for each column in a dataframe.
This function is informative.
Inputs:
- df: Pandas dataframe
Returns:
- None
"""
for c in df.columns:
missing_perc = (sum(pd.isnull(df[c]))/len(df))*100
if missing_perc > 0:
print("%.1f%% missing from: Column %s" %(missing_perc, c))
def pickle_object(obj, name):
"""
Short helper function to pickle any object in python
WARNING: NOT FOR USE WITH BEAUTIFULSOUP OBJECTS - see bs4 documentation.
Inputs:
- obj: Python object to be pickled
- name: resulting name of pickle file. NOTE: No need to add .pkl in the name
Returns:
- None. Function writes straight to disk.
"""
with open(name+".pkl", 'wb') as f:
pickle.dump(obj, f, protocol=4)
def unpickle_object(pkl):
"""
Short helper fucntion to unpickle any object in python
Inputs:
- pickle object from disk.
Returns:
- unpickled file now available in python namespace
"""
return pickle.load(open(pkl, 'rb'))
data_dict = unpickle_object("data_dict.pkl")
def plot_corr_matrix(data):
'''
Heatmap of correlation matrix
Inputs: dataframe
Returns: Heatmap
(Green + corr. Red - corr.)
'''
sns.set(font_scale=1.4)#for label size
ax = plt.axes()
sns.heatmap(data.corr(), square=True, cmap='RdYlGn')
ax.set_title('Correlation Matrix')
#functions below relate to logisitic regression coef interpretation
def convert_to_prob(coef):
numerator = np.exp(coef)
denom = 1 + np.exp(coef)
return numerator/denom
def convert_to_odds(coef):
return np.exp(coef)
def compare_odds(odd1, odd2):
return odd1/odd2
|
mit
|
sambitgaan/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py
|
69
|
40833
|
"""
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
|
agpl-3.0
|
LiaoPan/scikit-learn
|
examples/linear_model/plot_logistic_path.py
|
349
|
1195
|
#!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
Nyker510/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
31
|
7929
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = self.named_estimators.copy()
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
wkfwkf/statsmodels
|
statsmodels/examples/ex_kernel_regression_sigtest.py
|
34
|
3177
|
# -*- coding: utf-8 -*-
"""Kernel Regression and Significance Test
Warning: SLOW, 11 minutes on my computer
Created on Thu Jan 03 20:20:47 2013
Author: Josef Perktold
results - this version
----------------------
>>> exec(open('ex_kernel_regression_censored1.py').read())
bw
[ 0.3987821 0.50933458]
[0.39878209999999997, 0.50933457999999998]
sig_test - default
Not Significant
pvalue
0.11
test statistic 0.000434305313291
bootstrap critical values
[ 0.00043875 0.00046808 0.0005064 0.00054151]
sig_test - pivot=True, nboot=200, nested_res=50
pvalue
0.01
test statistic 6.17877171579
bootstrap critical values
[ 5.5658345 5.74761076 5.87386858 6.46012041]
times: 8.34599995613 20.6909999847 666.373999834
"""
from __future__ import print_function
import time
import numpy as np
import statsmodels.nonparametric.api as nparam
import statsmodels.nonparametric.kernel_regression as smkr
if __name__ == '__main__':
t0 = time.time()
#example from test file
nobs = 200
np.random.seed(1234)
C1 = np.random.normal(size=(nobs, ))
C2 = np.random.normal(2, 1, size=(nobs, ))
noise = np.random.normal(size=(nobs, ))
Y = 0.3 +1.2 * C1 - 0.9 * C2 + noise
#self.write2file('RegData.csv', (Y, C1, C2))
#CODE TO PRODUCE BANDWIDTH ESTIMATION IN R
#library(np)
#data <- read.csv('RegData.csv', header=FALSE)
#bw <- npregbw(formula=data$V1 ~ data$V2 + data$V3,
# bwmethod='cv.aic', regtype='lc')
model = nparam.KernelReg(endog=[Y], exog=[C1, C2],
reg_type='lc', var_type='cc', bw='aic')
mean, marg = model.fit()
#R_bw = [0.4017893, 0.4943397] # Bandwidth obtained in R
bw_expected = [0.3987821, 0.50933458]
#npt.assert_allclose(model.bw, bw_expected, rtol=1e-3)
print('bw')
print(model.bw)
print(bw_expected)
print('\nsig_test - default')
print(model.sig_test([1], nboot=100))
t1 = time.time()
res0 = smkr.TestRegCoefC(model, [1])
print('pvalue')
print((res0.t_dist >= res0.test_stat).mean())
print('test statistic', res0.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort0 = np.sort(res0.t_dist)
nrep0 = len(bsort0)
print(bsort0[(probs * nrep0).astype(int)])
t2 = time.time()
print('\nsig_test - pivot=True, nboot=200, nested_res=50')
res1 = smkr.TestRegCoefC(model, [1], pivot=True, nboot=200, nested_res=50)
print('pvalue')
print((res1.t_dist >= res1.test_stat).mean())
print('test statistic', res1.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort1 = np.sort(res1.t_dist)
nrep1 = len(bsort1)
print(bsort1[(probs * nrep1).astype(int)])
t3 = time.time()
print('times:', t1-t0, t2-t1, t3-t2)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.plot(x, y, 'o', alpha=0.5)
# ax.plot(x, y_cens, 'o', alpha=0.5)
# ax.plot(x, y_true, lw=2, label='DGP mean')
# ax.plot(x, sm_mean, lw=2, label='model 0 mean')
# ax.plot(x, mean2, lw=2, label='model 2 mean')
# ax.legend()
#
# plt.show()
|
bsd-3-clause
|
SKA-ScienceDataProcessor/algorithm-reference-library
|
deprecated_code/workflows/mpi/imaging-pipelines_arlexecute.py
|
1
|
14745
|
# coding: utf-8
# # Pipeline processing using arlexecute workflows.
#
# This notebook demonstrates the continuum imaging and ICAL pipelines. These are based on ARL functions wrapped up as SDP workflows using the arlexecute class.
# In[1]:
#get_ipython().magic('matplotlib inline')
import os
import sys
sys.path.append(os.path.join('..', '..'))
from data_models.parameters import arl_path
results_dir = arl_path('./results/dask')
#results_dir = './workflows/mpi/results/dask'
#from matplotlib import pylab
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'rainbow'
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
#from matplotlib import pyplot as plt
from data_models.polarisation import PolarisationFrame
from wrappers.serial.calibration.calibration import solve_gaintable
from wrappers.serial.calibration.operations import apply_gaintable
from wrappers.serial.calibration.calibration_control import create_calibration_controls
from wrappers.serial.visibility.base import create_blockvisibility
from wrappers.serial.visibility.coalesce import convert_blockvisibility_to_visibility, convert_visibility_to_blockvisibility
from wrappers.serial.skycomponent.operations import create_skycomponent
from wrappers.serial.image.deconvolution import deconvolve_cube
#from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image
from wrappers.serial.image.operations import export_image_to_fits, qa_image
from wrappers.serial.visibility.iterators import vis_timeslice_iter
from wrappers.serial.simulation.testing_support import create_low_test_image_from_gleam
from processing_components.simulation.configurations import create_named_configuration
from wrappers.serial.imaging.base import predict_2d, create_image_from_visibility, advise_wide_field
from workflows.arlexecute.imaging.imaging_arlexecute import invert_list_arlexecute_workflow, predict_list_arlexecute_workflow, deconvolve_list_arlexecute_workflow
from workflows.arlexecute.simulation.simulation_arlexecute import simulate_list_arlexecute_workflow, corrupt_list_arlexecute_workflow
from workflows.arlexecute.pipelines.pipeline_arlexecute import continuum_imaging_list_arlexecute_workflow, ical_list_arlexecute_workflow
from wrappers.arlexecute.execution_support.arlexecute import arlexecute
from wrappers.arlexecute.execution_support.dask_init import findNodes, get_dask_Client
import time
import pprint
pp = pprint.PrettyPrinter()
import logging
import argparse
def init_logging():
log = logging.getLogger()
print('Results dir: %s' % results_dir)
logging.basicConfig(filename='%s/imaging-pipeline.log' % results_dir,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
parser = argparse.ArgumentParser(description='Imaging pipelines in Dask.')
parser.add_argument('--nfreqwin', type=int, nargs='?', default=7,
help='The number of frequency windows')
parser.add_argument('--schedulerfile', type=str, nargs='?',
default='./scheduler.json',
help='The scheduler file if starting with dask-mpi')
args = parser.parse_args()
# We will use dask
# In[2]:
threads_per_worker=16
memory=384
nworkers=1
filename=args.schedulerfile
client = get_dask_Client(threads_per_worker=threads_per_worker,
processes = threads_per_worker == 1,
memory_limit=memory * 1024 * 1024 * 1024,
n_workers=nworkers, with_file=True,
scheduler_file=filename)
arlexecute.set_client(client)
nodes = findNodes(arlexecute.client)
print("Defined %d workers on %d nodes" % (nworkers, len(nodes)))
print("Workers are: %s" % str(nodes))
#arlexecute.set_client(use_dask=True)
init_logging()
log = logging.getLogger()
logging.info("Starting imaging-pipeline")
# Initialise logging on the workers. This appears to only work using
# the process scheduler.
arlexecute.run(init_logging)
# In[3]:
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'Greys'
# We create a graph to make the visibility. The parameter rmax determines the distance of the furthest antenna/stations used. All over parameters are determined from this number.
# In[4]:
#nfreqwin=7
nfreqwin=args.nfreqwin
#ntimes=5
rmax=300.0
#frequency=numpy.linspace(1.0e8,1.2e8,nfreqwin)
ntimes=11
frequency=numpy.linspace(0.9e8,1.1e8,nfreqwin)
channel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])
times = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)
#phasecentre=SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')
phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
bvis_list=simulate_list_arlexecute_workflow('LOWBD2',
frequency=frequency,
channel_bandwidth=channel_bandwidth,
times=times,
phasecentre=phasecentre,
order='frequency',
rmax=rmax)
print('%d elements in vis_list' % len(bvis_list))
log.info('About to make visibility')
vis_list = [arlexecute.execute(convert_blockvisibility_to_visibility)(bv) for bv in bvis_list]
vis_list = arlexecute.compute(vis_list, sync=True)
# In[5]:
wprojection_planes=1
advice_low=advise_wide_field(vis_list[0], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
advice_high=advise_wide_field(vis_list[-1], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
npixel=advice_high['npixels2']
cellsize=min(advice_low['cellsize'], advice_high['cellsize'])
# Now make a graph to fill with a model drawn from GLEAM
# In[6]:
gleam_model = [arlexecute.execute(create_low_test_image_from_gleam)(npixel=npixel,
frequency=[frequency[f]],
channel_bandwidth=[channel_bandwidth[f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=1.0,
applybeam=True)
for f, freq in enumerate(frequency)]
log.info('About to make GLEAM model')
gleam_model = arlexecute.compute(gleam_model, sync=True)
future_gleam_model = arlexecute.scatter(gleam_model)
# In[7]:
log.info('About to run predict to get predicted visibility')
start=time.time()
future_vis_graph = arlexecute.scatter(vis_list)
predicted_vislist = predict_list_arlexecute_workflow(future_vis_graph, gleam_model,
context='wstack', vis_slices=vis_slices)
predicted_vislist = arlexecute.compute(predicted_vislist, sync=True)
end=time.time()
corrupted_vislist = corrupt_list_arlexecute_workflow(predicted_vislist, phase_error=1.0)
log.info('About to run corrupt to get corrupted visibility')
corrupted_vislist = arlexecute.compute(corrupted_vislist, sync=True)
future_predicted_vislist=arlexecute.scatter(predicted_vislist)
# Get the LSM. This is currently blank.
# In[8]:
print('predict finished in %f seconds'%(end-start),flush=True)
model_list = [arlexecute.execute(create_image_from_visibility)(vis_list[f],
npixel=npixel,
frequency=[frequency[f]],
channel_bandwidth=[channel_bandwidth[f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
for f, freq in enumerate(frequency)]
# In[9]:
start=time.time()
dirty_list = invert_list_arlexecute_workflow(future_predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_arlexecute_workflow(future_predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
# Create and execute graphs to make the dirty image and PSF
# In[10]:
log.info('About to run invert to get dirty image')
dirty_list = arlexecute.compute(dirty_list, sync=True)
psf_list = arlexecute.compute(psf_list, sync=True)
end=time.time()
print('invert finished in %f seconds'%(end-start),flush=True)
dirty = dirty_list[0][0]
#show_image(dirty, cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
print(qa_image(dirty))
export_image_to_fits(dirty, '%s/imaging-dirty.fits'
%(results_dir))
log.info('About to run invert to get PSF')
psf = psf_list[0][0]
#show_image(psf, cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
print(qa_image(psf))
export_image_to_fits(psf, '%s/imaging-psf.fits'
%(results_dir))
# Now deconvolve using msclean
# In[11]:
log.info('About to run deconvolve')
start=time.time()
deconvolve_list, _ = deconvolve_list_arlexecute_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
deconvolved = arlexecute.compute(deconvolve_list, sync=True)
#show_image(deconvolved[0], cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
end=time.time()
print('deconvolve finished in %f sec'%(end-start))
# In[12]:
start=time.time()
continuum_imaging_list = continuum_imaging_list_arlexecute_workflow(future_predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
# In[13]:
log.info('About to run continuum imaging')
result=arlexecute.compute(continuum_imaging_list, sync=True)
end=time.time()
print('continuum imaging finished in %f sec.'%(end-start),flush=True)
deconvolved = result[0][0]
residual = result[1][0]
restored = result[2][0]
#f=show_image(deconvolved, title='Clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(deconvolved, context='Clean image - no selfcal'))
#plt.show()
#f=show_image(restored, title='Restored clean image - no selfcal',
#cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(restored, context='Restored clean image - no selfcal'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_continuum_imaging_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image - no selfcal'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_continuum_imaging_residual.fits'
%(results_dir))
# In[14]:
controls = create_calibration_controls()
controls['T']['first_selfcal'] = 1
controls['G']['first_selfcal'] = 3
controls['B']['first_selfcal'] = 4
controls['T']['timeslice'] = 'auto'
controls['G']['timeslice'] = 'auto'
controls['B']['timeslice'] = 1e5
pp.pprint(controls)
# In[15]:
start=time.time()
ical_list = ical_list_arlexecute_workflow(corrupted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
# In[16]:
log.info('About to run ical')
ical_list = arlexecute.compute(ical_list, sync=True)
end=time.time()
print('ical finished in %f sec.'%(end-start),flush=True)
deconvolved = ical_list[0][0]
residual = ical_list[1][0]
restored = ical_list[2][0]
#f=show_image(deconvolved, title='Clean image', cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(deconvolved, context='Clean image'))
#plt.show()
#f=show_image(restored, title='Restored clean image', cm='Greys', vmax=1.0,
# vmin=-0.1)
print(qa_image(restored, context='Restored clean image'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_ical_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_ical_residual.fits'
%(results_dir))
# In[ ]:
|
apache-2.0
|
holdenk/spark
|
python/pyspark/sql/dataframe.py
|
4
|
99864
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3.0
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Examples
--------
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Examples
--------
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.",
FutureWarning
)
self._jdf.createOrReplaceTempView(name)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.1.0
Examples
--------
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
.. versionadded:: 2.2.0
Examples
--------
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
def printSchema(self):
"""Prints out the schema in the tree format.
.. versionadded:: 1.3.0
Examples
--------
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 1.3.0
parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise Exception("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
return self._jdf.isStreaming()
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
.. versionadded:: 2.1.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
Parameters
----------
eventTime : str or :class:`Column`
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Notes
-----
This API is evolving.
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.count()
2
"""
return int(self._jdf.count())
def collect(self):
"""Returns all the records as a list of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
Examples
--------
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
def limit(self, num):
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
Examples
--------
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
Examples
--------
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
.. versionadded:: 2.1.0
Examples
--------
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
.. versionadded:: 1.4.0
Parameters
----------
numPartitions : int
specify the target number of partitions
Examples
--------
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Examples
--------
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
.. versionadded:: 2.4.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Notes
-----
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise ValueError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Examples
--------
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
def dtypes(self):
"""Returns all column names and their data types as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
def columns(self):
"""Returns all column names as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
.. versionadded:: 2.3.0
Parameters
----------
colName : str
string, column name specified as a regex.
Examples
--------
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
.. versionadded:: 1.3.0
Parameters
----------
alias : str
an alias name to be set for the :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
.. versionadded:: 2.1.0
Parameters
----------
other : :class:`DataFrame`
Right side of the cartesian product.
Examples
--------
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
.. versionadded:: 1.3.1
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Use summary for expanded statistics and control over which statistics to compute.
Examples
--------
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
See Also
--------
DataFrame.summary
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
def first(self):
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Examples
--------
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
Examples
--------
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
Examples
--------
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
Examples
--------
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy().agg()``).
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
.. versionadded:: 2.3.0
Examples
--------
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``, the set of column names
in this and other :class:`DataFrame` can differ; missing columns will be filled with null.
Further, the missing columns of this :class:`DataFrame` will be added at the end
in the schema of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Examples
--------
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise ValueError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
.. versionadded:: 2.0.0
Parameters
----------
col: str, tuple or list
Can be a single column name, or a list of names for multiple columns.
.. versionchanged:: 2.2
Added support for multiple columns.
probabilities : list or tuple
a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError : float
The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
Returns
-------
list
the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
"""
if not isinstance(col, (str, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
method : str, optional
The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column. Distinct items will make the first item of
each row.
col2 : str
The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
Examples
--------
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
.. versionadded:: 1.3.0
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Examples
--------
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Examples
--------
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
Parameters
----------
cols : str
new column names
Examples
--------
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. versionadded:: 3.1.0
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise ValueError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
mraspaud/dask
|
dask/tests/test_core.py
|
4
|
6355
|
from collections import namedtuple
import pytest
import pickle
from dask.utils_test import GetFunctionTestMixin, inc, add
from dask import core
from dask.core import (istask, get_dependencies, get_deps, flatten, subs,
preorder_traversal, literal, quote, has_tasks)
def contains(a, b):
"""
>>> contains({'x': 1, 'y': 2}, {'x': 1})
True
>>> contains({'x': 1, 'y': 2}, {'z': 3})
False
"""
return all(a.get(k) == v for k, v in b.items())
def test_istask():
assert istask((inc, 1))
assert not istask(1)
assert not istask((1, 2))
f = namedtuple('f', ['x', 'y'])
assert not istask(f(sum, 2))
def test_has_tasks():
dsk = {'a': [1, 2, 3],
'b': 'a',
'c': [1, (inc, 1)],
'd': [(sum, 'a')],
'e': ['a', 'b'],
'f': [['a', 'b'], 2, 3]}
assert not has_tasks(dsk, dsk['a'])
assert has_tasks(dsk, dsk['b'])
assert has_tasks(dsk, dsk['c'])
assert has_tasks(dsk, dsk['d'])
assert has_tasks(dsk, dsk['e'])
assert has_tasks(dsk, dsk['f'])
def test_preorder_traversal():
t = (add, 1, 2)
assert list(preorder_traversal(t)) == [add, 1, 2]
t = (add, (add, 1, 2), (add, 3, 4))
assert list(preorder_traversal(t)) == [add, add, 1, 2, add, 3, 4]
t = (add, (sum, [1, 2]), 3)
assert list(preorder_traversal(t)) == [add, sum, list, 1, 2, 3]
class TestGet(GetFunctionTestMixin):
get = staticmethod(core.get)
class TestRecursiveGet(GetFunctionTestMixin):
get = staticmethod(lambda d, k: core.get(d, k, recursive=True))
def test_get_stack_limit(self):
# will blow stack in recursive mode
pass
def test_GetFunctionTestMixin_class():
class TestCustomGetFail(GetFunctionTestMixin):
get = staticmethod(lambda x, y: 1)
custom_testget = TestCustomGetFail()
pytest.raises(AssertionError, custom_testget.test_get)
class TestCustomGetPass(GetFunctionTestMixin):
get = staticmethod(core.get)
custom_testget = TestCustomGetPass()
custom_testget.test_get()
def test_get_dependencies_nested():
dsk = {'x': 1, 'y': 2,
'z': (add, (inc, [['x']]), 'y')}
assert get_dependencies(dsk, 'z') == set(['x', 'y'])
assert sorted(get_dependencies(dsk, 'z', as_list=True)) == ['x', 'y']
def test_get_dependencies_empty():
dsk = {'x': (inc,)}
assert get_dependencies(dsk, 'x') == set()
assert get_dependencies(dsk, 'x', as_list=True) == []
def test_get_dependencies_list():
dsk = {'x': 1, 'y': 2, 'z': ['x', [(inc, 'y')]]}
assert get_dependencies(dsk, 'z') == set(['x', 'y'])
assert sorted(get_dependencies(dsk, 'z', as_list=True)) == ['x', 'y']
def test_get_dependencies_task():
dsk = {'x': 1, 'y': 2, 'z': ['x', [(inc, 'y')]]}
assert get_dependencies(dsk, task=(inc, 'x')) == set(['x'])
assert get_dependencies(dsk, task=(inc, 'x'), as_list=True) == ['x']
def test_get_dependencies_nothing():
with pytest.raises(ValueError):
get_dependencies({})
def test_get_dependencies_many():
dsk = {'a': [1, 2, 3],
'b': 'a',
'c': [1, (inc, 1)],
'd': [(sum, 'c')],
'e': ['a', 'b', 'zzz'],
'f': [['a', 'b'], 2, 3]}
tasks = [dsk[k] for k in ('d', 'f')]
s = get_dependencies(dsk, task=tasks)
assert s == {'a', 'b', 'c'}
s = get_dependencies(dsk, task=tasks, as_list=True)
assert sorted(s) == ['a', 'b', 'c']
s = get_dependencies(dsk, task=[])
assert s == set()
s = get_dependencies(dsk, task=[], as_list=True)
assert s == []
def test_get_deps():
"""
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> dependencies
{'a': set([]), 'c': set(['b']), 'b': set(['a'])}
>>> dependents
{'a': set(['b']), 'c': set([]), 'b': set(['c'])}
"""
dsk = {'a': [1, 2, 3],
'b': 'a',
'c': [1, (inc, 1)],
'd': [(sum, 'c')],
'e': ['b', 'zzz', 'b'],
'f': [['a', 'b'], 2, 3]}
dependencies, dependents = get_deps(dsk)
assert dependencies == {'a': set(),
'b': {'a'},
'c': set(),
'd': {'c'},
'e': {'b'},
'f': {'a', 'b'},
}
assert dependents == {'a': {'b', 'f'},
'b': {'e', 'f'},
'c': {'d'},
'd': set(),
'e': set(),
'f': set(),
}
def test_flatten():
assert list(flatten(())) == []
assert list(flatten('foo')) == ['foo']
def test_subs():
assert subs((sum, [1, 'x']), 'x', 2) == (sum, [1, 2])
assert subs((sum, [1, ['x']]), 'x', 2) == (sum, [1, [2]])
class MutateOnEq(object):
hit_eq = 0
def __eq__(self, other):
self.hit_eq += 1
return False
def test_subs_no_key_data_eq():
# Numpy throws a deprecation warning on bool(array == scalar), which
# pollutes the terminal. This test checks that `subs` never tries to
# compare keys (scalars) with values (which could be arrays)`subs` never
# tries to compare keys (scalars) with values (which could be arrays).
a = MutateOnEq()
subs(a, 'x', 1)
assert a.hit_eq == 0
subs((add, a, 'x'), 'x', 1)
assert a.hit_eq == 0
def test_subs_with_unfriendly_eq():
try:
import numpy as np
except:
return
else:
task = (np.sum, np.array([1, 2]))
assert (subs(task, (4, 5), 1) == task) is True
class MyException(Exception):
pass
class F():
def __eq__(self, other):
raise MyException()
task = F()
assert subs(task, 1, 2) is task
def test_subs_with_surprisingly_friendly_eq():
try:
import pandas as pd
except:
return
else:
df = pd.DataFrame()
assert subs(df, 'x', 1) is df
def test_quote():
literals = [[1, 2, 3], (add, 1, 2),
[1, [2, 3]], (add, 1, (add, 2, 3))]
for l in literals:
assert core.get({'x': quote(l)}, 'x') == l
def test_literal_serializable():
l = literal((add, 1, 2))
assert pickle.loads(pickle.dumps(l)).data == (add, 1, 2)
|
bsd-3-clause
|
jeicher/cobrapy
|
cobra/core/Metabolite.py
|
2
|
8717
|
from warnings import warn
import re
from six import iteritems
from .Species import Species
# Numbers are not required because of the |(?=[A-Z])? block. See the
# discussion in https://github.com/opencobra/cobrapy/issues/128 for
# more details.
element_re = re.compile("([A-Z][a-z]?)([0-9.]+[0-9.]?|(?=[A-Z])?)")
class Metabolite(Species):
"""Metabolite is a class for holding information regarding
a metabolite in a cobra.Reaction object.
"""
def __init__(self, id=None, formula=None, name="",
charge=None, compartment=None):
"""
id: str
formula: str
Chemical formula (i.e. H2O)
name: str
A human readable name.
compartment: str or None
Compartment of metabolite.
"""
Species.__init__(self, id, name)
self.formula = formula
# because in a Model a metabolite may participate in multiple Reactions
self.compartment = compartment
self.charge = charge
self._constraint_sense = 'E'
self._bound = 0.
@property
def elements(self):
tmp_formula = self.formula
if tmp_formula is None:
return {}
# necessary for some old pickles which use the deprecated
# Formula class
tmp_formula = str(self.formula)
# commonly occuring characters in incorrectly constructed formulas
if "*" in tmp_formula:
warn("invalid character '*' found in formula '%s'" % self.formula)
tmp_formula = tmp_formula.replace("*", "")
if "(" in tmp_formula or ")" in tmp_formula:
warn("invalid formula (has parenthesis) in '%s'" % self.formula)
return None
composition = {}
parsed = element_re.findall(tmp_formula)
for (element, count) in parsed:
if count == '':
count = 1
else:
try:
count = float(count)
int_count = int(count)
if count == int_count:
count = int_count
else:
warn("%s is not an integer (in formula %s)" %
(count, self.formula))
except ValueError:
warn("failed to parse %s (in formula %s)" %
(count, self.formula))
return None
if element in composition:
composition[element] += count
else:
composition[element] = count
return composition
@elements.setter
def elements(self, elements_dict):
def stringify(element, number):
return element if number == 1 else element + str(number)
self.formula = ''.join(stringify(e, n) for e, n in
sorted(iteritems(elements_dict)))
@property
def formula_weight(self):
"""Calculate the formula weight"""
try:
return sum([count * elements_and_molecular_weights[element]
for element, count in self.elements.items()])
except KeyError as e:
warn("The element %s does not appear in the peridic table" % e)
@property
def y(self):
"""The shadow price for the metabolite in the most recent solution
Shadow prices are computed from the dual values of the bounds in
the solution.
"""
try:
return self._model.solution.y_dict[self.id]
except Exception as e:
if self._model is None:
raise Exception("not part of a model")
if not hasattr(self._model, "solution") or \
self._model.solution is None or \
self._model.solution.status == "NA":
raise Exception("model has not been solved")
if self._model.solution.status != "optimal":
raise Exception("model solution was not optimal")
raise e # Not sure what the exact problem was
def remove_from_model(self, method='subtractive', **kwargs):
"""Removes the association from self.model
method: 'subtractive' or 'destructive'.
If 'subtractive' then the metabolite is removed from all
associated reactions. If 'destructive' then all associated
reactions are removed from the Model.
"""
# why is model being taken in as a parameter? This plays
# back to the question of allowing a Metabolite to be associated
# with multiple Models
if "model" in kwargs:
warn("model argument deprecated")
self._model.metabolites.remove(self)
self._model = None
if method.lower() == 'subtractive':
for the_reaction in list(self._reaction):
the_coefficient = the_reaction._metabolites[self]
the_reaction.subtract_metabolites({self: the_coefficient})
elif method.lower() == 'destructive':
for x in self._reaction:
x.remove_from_model()
else:
raise Exception(method + " is not 'subtractive' or 'destructive'")
def summary(self, **kwargs):
"""Print a summary of the reactions which produce and consume this
metabolite. This method requires the model for which this metabolite is
a part to be solved.
threshold: float
a value below which to ignore reaction fluxes
fva: float (0->1), or None
Whether or not to include flux variability analysis in the output.
If given, fva should be a float between 0 and 1, representing the
fraction of the optimum objective to be searched.
floatfmt: string
format method for floats, passed to tabulate. Default is '.3g'.
"""
try:
from ..flux_analysis.summary import metabolite_summary
return metabolite_summary(self, **kwargs)
except ImportError:
warn('Summary methods require pandas/tabulate')
elements_and_molecular_weights = {
'H': 1.007940,
'He': 4.002602,
'Li': 6.941000,
'Be': 9.012182,
'B': 10.811000,
'C': 12.010700,
'N': 14.006700,
'O': 15.999400,
'F': 18.998403,
'Ne': 20.179700,
'Na': 22.989770,
'Mg': 24.305000,
'Al': 26.981538,
'Si': 28.085500,
'P': 30.973761,
'S': 32.065000,
'Cl': 35.453000,
'Ar': 39.948000,
'K': 39.098300,
'Ca': 40.078000,
'Sc': 44.955910,
'Ti': 47.867000,
'V': 50.941500,
'Cr': 51.996100,
'Mn': 54.938049,
'Fe': 55.845000,
'Co': 58.933200,
'Ni': 58.693400,
'Cu': 63.546000,
'Zn': 65.409000,
'Ga': 69.723000,
'Ge': 72.640000,
'As': 74.921600,
'Se': 78.960000,
'Br': 79.904000,
'Kr': 83.798000,
'Rb': 85.467800,
'Sr': 87.620000,
'Y': 88.905850,
'Zr': 91.224000,
'Nb': 92.906380,
'Mo': 95.940000,
'Tc': 98.000000,
'Ru': 101.070000,
'Rh': 102.905500,
'Pd': 106.420000,
'Ag': 107.868200,
'Cd': 112.411000,
'In': 114.818000,
'Sn': 118.710000,
'Sb': 121.760000,
'Te': 127.600000,
'I': 126.904470,
'Xe': 131.293000,
'Cs': 132.905450,
'Ba': 137.327000,
'La': 138.905500,
'Ce': 140.116000,
'Pr': 140.907650,
'Nd': 144.240000,
'Pm': 145.000000,
'Sm': 150.360000,
'Eu': 151.964000,
'Gd': 157.250000,
'Tb': 158.925340,
'Dy': 162.500000,
'Ho': 164.930320,
'Er': 167.259000,
'Tm': 168.934210,
'Yb': 173.040000,
'Lu': 174.967000,
'Hf': 178.490000,
'Ta': 180.947900,
'W': 183.840000,
'Re': 186.207000,
'Os': 190.230000,
'Ir': 192.217000,
'Pt': 195.078000,
'Au': 196.966550,
'Hg': 200.590000,
'Tl': 204.383300,
'Pb': 207.200000,
'Bi': 208.980380,
'Po': 209.000000,
'At': 210.000000,
'Rn': 222.000000,
'Fr': 223.000000,
'Ra': 226.000000,
'Ac': 227.000000,
'Th': 232.038100,
'Pa': 231.035880,
'U': 238.028910,
'Np': 237.000000,
'Pu': 244.000000,
'Am': 243.000000,
'Cm': 247.000000,
'Bk': 247.000000,
'Cf': 251.000000,
'Es': 252.000000,
'Fm': 257.000000,
'Md': 258.000000,
'No': 259.000000,
'Lr': 262.000000,
'Rf': 261.000000,
'Db': 262.000000,
'Sg': 266.000000,
'Bh': 264.000000,
'Hs': 277.000000,
'Mt': 268.000000,
'Ds': 281.000000,
'Rg': 272.000000,
'Cn': 285.000000,
'Uuq': 289.000000,
'Uuh': 292.000000
}
|
lgpl-2.1
|
kwohlfahrt/pywt
|
demo/plot_wavelets.py
|
8
|
2656
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Plot scaling and wavelet functions for db, sym, coif, bior and rbio families
import itertools
import matplotlib.pyplot as plt
import pywt
plot_data = [('db', (4, 3)),
('sym', (4, 3)),
('coif', (3, 2))]
for family, (rows, cols) in plot_data:
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, wspace=0.2, bottom=.02, left=.06,
right=.97, top=.94)
colors = itertools.cycle('bgrcmyk')
wnames = pywt.wavelist(family)
i = iter(wnames)
for col in range(cols):
for row in range(rows):
try:
wavelet = pywt.Wavelet(next(i))
except StopIteration:
break
phi, psi, x = wavelet.wavefun(level=5)
color = next(colors)
ax = fig.add_subplot(rows, 2 * cols, 1 + 2 * (col + row * cols))
ax.set_title(wavelet.name + " phi")
ax.plot(x, phi, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(rows, 2*cols, 1 + 2*(col + row*cols) + 1)
ax.set_title(wavelet.name + " psi")
ax.plot(x, psi, color)
ax.set_xlim(min(x), max(x))
for family, (rows, cols) in [('bior', (4, 3)), ('rbio', (4, 3))]:
fig = plt.figure()
fig.subplots_adjust(hspace=0.5, wspace=0.2, bottom=.02, left=.06,
right=.97, top=.94)
colors = itertools.cycle('bgrcmyk')
wnames = pywt.wavelist(family)
i = iter(wnames)
for col in range(cols):
for row in range(rows):
try:
wavelet = pywt.Wavelet(next(i))
except StopIteration:
break
phi, psi, phi_r, psi_r, x = wavelet.wavefun(level=5)
row *= 2
color = next(colors)
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols))
ax.set_title(wavelet.name + " phi")
ax.plot(x, phi, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(2*rows, 2*cols, 2*(1 + col + row*cols))
ax.set_title(wavelet.name + " psi")
ax.plot(x, psi, color)
ax.set_xlim(min(x), max(x))
row += 1
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols))
ax.set_title(wavelet.name + " phi_r")
ax.plot(x, phi_r, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols) + 1)
ax.set_title(wavelet.name + " psi_r")
ax.plot(x, psi_r, color)
ax.set_xlim(min(x), max(x))
plt.show()
|
mit
|
sjtudesigner/NeuralSight
|
model/activation_model.py
|
1
|
1315
|
from theano import function
from scipy.misc import imread, imresize
import numpy as np
import matplotlib.pyplot as plt
import os
from keras import backend as K
image_path = "./static/upload/"
def compute_activation(model, layer_id, image, out_path, size=(224, 224)):
if os.path.exists(out_path):
return len(os.listdir(out_path))
layer = model.layers[layer_id]
f = function([model.input, K.learning_phase()], layer.output, on_unused_input='ignore')
# hopefully this is a colored image
im = imread(image_path + image, mode='RGB') / 255.
print "im:", im.shape
imsize = model.input_shape[2:4] if model.input_shape[2] and model.input_shape[3] else size
im = imresize(im, imsize)
if len(im.shape) == 3:
im = im.transpose(2, 0, 1)[np.newaxis, :]
else:
im = im[np.newaxis, np.newaxis, :]
result = f(im, 0.)[0]
os.mkdir(out_path)
if len(result.shape) == 3:
for k in range(result.shape[0]):
plt.imsave(out_path + str(k + 1) + ".png", result[k])
return result.shape[0]
elif len(result.shape) == 1:
# histogram
fig = plt.figure(figsize=(20, 10))
plt.xlim([0, result.shape[0]])
plt.bar(np.arange(result.shape[0]), result, width=1)
fig.savefig(out_path + "1.png")
return 1
|
mit
|
AlexanderFabisch/scikit-learn
|
examples/linear_model/plot_polynomial_interpolation.py
|
168
|
2088
|
#!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
|
bsd-3-clause
|
facemelters/data-science
|
fbVideoStats.py
|
1
|
4544
|
import pandas as pd
import numpy as np
import requests
import json
import gspread
import datetime
from time import sleep
from oauth2client.client import SignedJwtAssertionCredentials
#Facebook Login and Parameter Setting via Command Line
json_fb_key = json.load(open('./Credentials/fb_api_key.json'))
apikey = json_fb_key['credentials']['apikey'].encode('ascii','ignore')
pageID = str(raw_input('Page ID? '))
num_posts = int(raw_input('How many pages of posts would you like to retrieve? '))
#Google Spreadsheets Login
json_key = json.load(open('./Credentials/'+ pageID + ' Update Client Secret.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
gc = gspread.authorize(credentials)
sheet_key = json.load(open('./Credentials/Facebook Sheet ID.json'))
sheet_key = sheet_key[pageID]
ws1 = gc.open_by_key(sheet_key).worksheet('Video')
def get_video_feed(pageID,num_posts):
pages = num_posts // 25
holder = []
counter = 0
endpoint = 'https://graph.facebook.com/v2.7/'+pageID+'/videos?fields=description,id,length,live_status,permalink_url,published,title,updated_time&access_token='+apikey
while counter <= pages:
response = requests.get(endpoint)
fb_data = response.json()
for item in range(len(fb_data['data'])):
holder.append(fb_data['data'][item])
try:
endpoint = fb_data['paging']['next']
counter += 1
except KeyError:
break
df = pd.DataFrame.from_dict(data=holder,orient='columns')
return df
def get_video_insights():
df = get_video_feed(pageID,num_posts)
df_stats = []
stories = 'total_video_stories_by_action_type'
attributes = ['total_video_impressions_unique','total_video_views_unique','total_video_30s_views_unique','total_video_complete_views_unique']
print df['id']
for item in df['id']:
endpoint = 'https://graph.facebook.com/v2.7/'+item+'/video_insights?access_token='+apikey
response = requests.get(endpoint)
data = response.json()
#Loop through JSON returned
tmp_dict = {}
for item in range(len(data['data'])):
#We only want a few features (defined above)
for attribute in attributes:
if data['data'][item]['name'] == attribute:
tmp_dict[attribute] = data['data'][item]['values'][0]['value']
elif data['data'][item]['name'] == stories:
tmp_dict['Shares'] = data['data'][item]['values'][0]['value']['share']
tmp_dict['Likes'] = data['data'][item]['values'][0]['value']['like']
tmp_dict['Comments'] = data['data'][item]['values'][0]['value']['comment']
df_stats.append(tmp_dict)
df_stats = pd.DataFrame.from_dict(data=df_stats,orient='columns')
df = df.join(df_stats)
return df
def numberToLetters(q):
q = q - 1
result = ''
while q >= 0:
remain = q % 26
result = chr(remain+65) + result;
q = q//26 - 1
return result
def main():
df = get_video_insights()
df.updated_time = df.updated_time.apply(lambda x: x.split('T')[0])
# Let's send the column names of dataframe to Google
columns = df.columns.values.tolist()
#Login to Google Spreadsheets (Doing this inside func b/c of timeout)
gc = gspread.authorize(credentials)
ws1 = gc.open_by_key(sheet_key).worksheet('Video')
# selection of the range that will be updated
cell_list = ws1.range('A1:'+numberToLetters(len(columns))+'1')
# modifying the values in the range
for cell in cell_list:
val = columns[cell.col-1]
if type(val) is str:
val = val.decode('utf-8')
cell.value = val
# update in batch
ws1.update_cells(cell_list)
#Now let's send the dataframe via the apikey
# number of lines and columns
num_lines, num_columns = df.shape
# selection of the range that will be updated
cell_list = ws1.range('A2:'+numberToLetters(num_columns)+str(num_lines+1))
# modifying the values in the range
for cell in cell_list:
val = df.iloc[cell.row-2,cell.col-1]
if type(val) is str:
val = val.decode('utf-8')
cell.value = val
# update in batch
ws1.update_cells(cell_list)
if __name__ == '__main__':
main()
#Take POST_ID of any video post
#/v2.7/{POST_ID}/?fields=object_id gives us the object_id
#v2.7/{OBJECT_ID}/video_insights
|
gpl-2.0
|
Akshay0724/scikit-learn
|
sklearn/ensemble/forest.py
|
11
|
67127
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
bsd-3-clause
|
Christoph/tag-connect
|
brushing/create_inliers.py
|
1
|
1329
|
import pandas as pd
import numpy as np
import codecs, json
data = []
for i in range(0, 1000):
element = {}
temp = []
p1 = [10, 20]
p2 = [50, 60]
p3 = [80, 70]
temp.append({"t": 0, "out": i})
temp.append({"t": 1, "out": i})
temp.append({"t": 2, "out": i})
element["data"] = temp
if i < 200:
x = p1[0] + np.random.randint(-7, 10)
y = p1[1] + np.random.randint(-10, 7)
elif i >= 200 and i < 400:
x = p2[0] - 10 + np.random.randint(-8, 2)
y = p2[1] + np.random.randint(-20, 12)
elif i >= 400 and i < 600:
x = p2[0] + np.random.randint(-8, 8)
y = p2[1] + np.random.randint(-20, 12)
elif i >= 600 and i < 800:
x = p2[0] + 10 + np.random.randint(-2, 10)
y = p2[1] + np.random.randint(-20, 12)
elif i >= 800:
x = p3[0] + np.random.randint(-5, 5)
y = p3[1] + np.random.randint(-15, 15)
if x >= 40 and x < 50 and y < 60 and y > 50:
x += int(2 * np.abs(x - 50))
elif x > 50 and x <= 60 and y < 60 and y > 50:
x -= int(2 * np.abs(x - 50))
element["params"] = {
"x": x,
"y": y
}
data.append(element)
data
file_path = "inlier.json"
json.dump(data, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4)
|
mit
|
roofit-dev/parallel-roofit-scripts
|
rootbench_analysis/RooFitMP_analysis.py
|
1
|
15483
|
import glob
import json
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import egp.plot
from IPython.display import display
default_columns = ('bm_name', 'NumCPU', 'manual_time')
columns = {'BM_RooFit_BinnedMultiProcGradient': ('bm_name', 'bins', 'NumCPU', 'manual_time'),
'BM_RooFit_1DUnbinnedGaussianMultiProcessGradMinimizer': default_columns,
'BM_RooFit_NDUnbinnedGaussianMultiProcessGradMinimizer': ('bm_name', 'NumCPU', 'dims', 'manual_time'),
'BM_RooFit_MP_GradMinimizer_workspace_file': default_columns,
'BM_RooFit_MP_GradMinimizer_workspace_file_noOptConst': default_columns,
'BM_RooFit_MP_GradMinimizer_workspace_file_NumCPUInConfigFile': ('bm_name', 'manual_time'),
'BM_RooFit_RooMinimizer_workspace_file': ('bm_name', 'manual_time'),
'BM_RooFit_RooMinimizer_workspace_file_noOptConst': ('bm_name', 'manual_time'),
'BM_RooFit_RooMinimizer_workspace_file_NumCPUInConfigFile': ('bm_name', 'manual_time')
}
# Functions for getting Google Benchmark results from json files
def load_result(it=-1, **kwargs):
json_files = sorted(glob.glob('/Users/pbos/projects/apcocsm/roofit-dev/rootbench/cmake-build-debug/root/roofit/roofit/RoofitMultiproc_*.json'))
return load_result_file(json_files[it], **kwargs)
def sort_raw_benchmarks_by_named_type(raw_dict):
benchmarks = defaultdict(list)
benchmark_number = 0
for bm in raw_dict['benchmarks']:
name = bm['name'].split('/')[0]
# order of benchmarks is important for merging with less structured sources (stdout) later on
# but only for real benchmarks, not the mean/median/stddev entries in the json file
if bm['name'].split('_')[-1] not in ('mean', 'median', 'stddev'):
bm['benchmark_number'] = benchmark_number
benchmark_number += 1
benchmarks[name].append(bm)
return benchmarks
def add_ideal_timings(df, group_ideal_by=None, time_col='real_time',
return_ideal=False, ideal_stat='min'):
if group_ideal_by is not None:
min_single_core = df[df['NumCPU'] == 1].groupby(group_ideal_by)[time_col].agg(ideal_stat)
df_ideal = min_single_core.to_frame(time_col)
df_ideal.reset_index(level=0, inplace=True)
else:
min_single_core = df[df['NumCPU'] == 1][time_col].agg(ideal_stat)
df_ideal = pd.DataFrame({time_col: [min_single_core]})
numCPU = np.unique(df['NumCPU'])
numCPU.sort()
df_numCPU = pd.Series(numCPU, name='NumCPU').to_frame()
# necessary for doing a cross merge (cartesian product):
df_numCPU['key'] = 1
df_ideal['key'] = 1
df_ideal = df_ideal.merge(df_numCPU, on='key', how='outer').drop('key', axis=1)
df_ideal[time_col] /= df_ideal['NumCPU']
df_ideal['real or ideal'] = "ideal"
df = pd.concat([df, df_ideal], sort=False)
if return_ideal:
return df_ideal
else:
return df
def df_from_raw_named_bmlist(bmlist, name, add_ideal, group_ideal_by=None):
df = pd.DataFrame(bmlist)
df_names = pd.DataFrame(df.name.str.slice(start=len("BM_RooFit_")).str.split('/').values.tolist(), columns=columns[name])
for c in columns[name][1:-1]:
df_names[c] = pd.to_numeric(df_names[c])
df = df.join(df_names)
# Drop mean, median and std results, only keep normal timings (we do stats ourselves):
df = df[df['manual_time'] == 'manual_time']
df = df.drop(['name', 'manual_time', 'cpu_time', 'iterations', 'time_unit'], axis=1)
# for single core runs, add NumCPU column:
if 'NumCPU' not in df.columns:
df['NumCPU'] = 1
if add_ideal:
print("Not plotting ideal, since this was only a single core run")
add_ideal = False
df["real or ideal"] = "real"
if add_ideal:
df = add_ideal_timings(df, group_ideal_by)
df = df.astype(dtype={'benchmark_number': 'Int64'})
return df
def hue_from_name(name):
if name == 'BM_RooFit_BinnedMultiProcGradient':
hue = 'bins'
elif name == 'BM_RooFit_NDUnbinnedGaussianMultiProcessGradMinimizer':
hue = 'dims'
else:
hue = None
return hue
def plot_result_dfs(dfs, show_dfs=False, figscale=6, match_y_axes=False):
fig, ax = egp.plot.subplots(len(dfs), wrap=3,
figsize=(len(dfs) * 1.1 * figscale, figscale),
squeeze=False)
ax = ax.flatten()
for ix, (name, df) in enumerate(dfs.items()):
if show_dfs:
display(df)
ax[ix].set_title(name)
sns.lineplot(data=df, x='NumCPU', y='real_time',
hue=hue_from_name(name), style="real or ideal",
markers=True, err_style="bars", legend='full',
ax=ax[ix])
if match_y_axes:
ymin, ymax = ax[0].get_ylim()
for axi in ax:
ymin = min(ymin, axi.get_ylim()[0])
ymax = max(ymax, axi.get_ylim()[1])
for axi in ax:
axi.set_ylim((ymin, ymax))
def load_result_file(fn, show_dfs=False, figscale=6, plot_ideal=True,
match_y_axes=False, plot_results=True):
dfs = {}
with open(fn) as fh:
raw = json.load(fh)
print(raw['context'])
benchmarks = sort_raw_benchmarks_by_named_type(raw)
for ix, (name, bmlist) in enumerate(benchmarks.items()):
dfs[name] = df_from_raw_named_bmlist(bmlist, name, plot_ideal, hue_from_name(name))
if plot_results:
plot_result_dfs(dfs, show_dfs=show_dfs, figscale=figscale, match_y_axes=match_y_axes)
return dfs
# Functions for extracting more finegrained info from stdout prints
def extract_split_timing_info(fn):
"""
Group lines by benchmark iteration, starting from migrad until
after the forks have been terminated.
"""
with open(fn, 'r') as fh:
lines = fh.read().splitlines()
bm_iterations = []
start_indices = []
end_indices = []
for ix, line in enumerate(lines):
if 'start migrad' in line:
if lines[ix - 1] == 'start migrad': # sometimes 'start migrad' appears twice
start_indices.pop()
start_indices.append(ix)
elif line[:11] == 'terminate: ':
end_indices.append(ix)
if len(start_indices) != len(end_indices):
raise Exception("Number of start and end indices unequal!")
for ix in range(len(start_indices)):
bm_iterations.append(lines[start_indices[ix] + 1:end_indices[ix] + 1])
return bm_iterations
def group_timing_lines(bm_iteration_lines):
"""
Group lines (from one benchmark iteration) by gradient call,
specifying:
- Update time
- Gradient work time
- For all partial derivatives a sublist of all lines
Finally, the terminate time for the entire bm_iteration is also
returned (last line in the list).
"""
gradient_calls = []
start_indices = []
end_indices = []
for ix, line in enumerate(bm_iteration_lines[:-1]): # -1: leave out terminate line
if line[:9] == 'worker_id':
if bm_iteration_lines[ix - 1][:9] != 'worker_id': # only use the first of these
start_indices.append(ix)
elif line[:12] == 'update_state':
end_indices.append(ix)
for ix in range(len(start_indices)):
gradient_calls.append({
'gradient_total': bm_iteration_lines[end_indices[ix]],
'partial_derivatives': bm_iteration_lines[start_indices[ix]:end_indices[ix]]
})
try:
terminate_line = bm_iteration_lines[-1]
except IndexError:
terminate_line = None
return gradient_calls, terminate_line
def build_df_split_timing_run(timing_grouped_lines_list, terminate_line):
data = {'time [s]': [], 'timing_type': [], 'worker_id': [], 'task': []}
for gradient_call_timings in timing_grouped_lines_list:
words = gradient_call_timings['gradient_total'].split()
data['time [s]'].append(float(words[1][:-2]))
data['timing_type'].append('update state')
data['worker_id'].append(None)
data['task'].append(None)
data['time [s]'].append(float(words[4][:-1]))
data['timing_type'].append('gradient work')
data['worker_id'].append(None)
data['task'].append(None)
for partial_derivative_line in gradient_call_timings['partial_derivatives']:
words = partial_derivative_line.split()
data['worker_id'].append(words[1][:-1])
data['task'].append(words[3][:-1])
data['time [s]'].append(float(words[7][:-1]))
data['timing_type'].append('partial derivative')
words = terminate_line.split()
data['time [s]'].append(float(words[1][:-1]))
data['timing_type'].append('terminate')
data['worker_id'].append(None)
data['task'].append(None)
return pd.DataFrame(data)
def build_dflist_split_timing_info(fn):
bm_iterations = extract_split_timing_info(fn)
dflist = []
for bm in bm_iterations:
grouped_lines, terminate_line = group_timing_lines(bm)
if terminate_line is not None:
dflist.append(build_df_split_timing_run(grouped_lines, terminate_line))
return dflist
def build_comb_df_split_timing_info(fn):
dflist = build_dflist_split_timing_info(fn)
ix = 0
for df in dflist:
df_pardiff = df[df["timing_type"] == "partial derivative"]
N_tasks = len(df_pardiff["task"].unique())
N_gradients = len(df_pardiff) // N_tasks
gradient_indices = np.hstack(i * np.ones(N_tasks, dtype='int') for i in range(N_gradients))
df["gradient number"] = pd.Series(dtype='Int64')
df.loc[df["timing_type"] == "partial derivative", "gradient number"] = gradient_indices
df["benchmark_number"] = ix
ix += 1
return pd.concat(dflist)
def combine_split_total_timings(df_total_timings, df_split_timings,
calculate_rest=True, exclude_from_rest=[],
add_ideal=[]):
df_meta = df_total_timings.drop(['real_time', 'real or ideal'], axis=1).dropna().set_index('benchmark_number', drop=True)
df_all_timings = df_total_timings.rename(columns={'real_time': 'time [s]'})
df_all_timings['time [s]'] /= 1000 # convert to seconds
df_all_timings['timing_type'] = 'total'
df_split_sum = {}
for name, df in df_split_timings.items():
df_split_sum[name] = df.groupby('benchmark_number').sum().join(df_meta, on='benchmark_number').reset_index()
df_split_sum[name]['real or ideal'] = 'real'
if name in add_ideal:
df_split_sum[name] = add_ideal_timings(df_split_sum[name], time_col='time [s]')
df_split_sum[name]['timing_type'] = name
# note: sort sorts the *columns* if they are not aligned, nothing happens with the column data itself
df_all_timings = pd.concat([df_all_timings, ] + list(df_split_sum.values()), sort=True)
if calculate_rest:
rest_time = df_all_timings[(df_all_timings['timing_type'] == 'total') & (df_all_timings['real or ideal'] == 'real')].set_index('benchmark_number')['time [s]']
for name, df in df_split_sum.items():
if name not in exclude_from_rest:
rest_time = rest_time - df.set_index('benchmark_number')['time [s]']
df_rest_time = rest_time.to_frame().join(df_meta, on='benchmark_number').reset_index()
df_rest_time['timing_type'] = 'rest'
df_rest_time['real or ideal'] = 'real'
# note: sort sorts the *columns* if they are not aligned, nothing happens with the column data itself
df_all_timings = df_all_timings.append(df_rest_time, sort=True)
return df_all_timings
def combine_detailed_with_gbench_timings_by_name(df_gbench, df_detailed, timing_types={}, **kwargs):
detailed_selection = {}
if len(timing_types) == 0:
raise Exception("Please give some timing_types, otherwise this function is pointless.")
for name, timing_type in timing_types.items():
detailed_selection[name] = df_detailed[df_detailed['timing_type'] == timing_type].drop('timing_type', axis=1)
return combine_split_total_timings(df_gbench, detailed_selection, **kwargs)
# Functions for plotting detailed partial derivatives timing statistics
def plot_partial_derivative_per_worker(data, figsize=(16, 10)):
N_tasks = len(data['task'].unique())
colors = plt.cm.get_cmap('prism', N_tasks)
fig, ax = plt.subplots(2, 4, sharey=True, figsize=figsize)
ax = ax.flatten()
for ix_n, n in enumerate(data['NumCPU'].unique()):
data_n = data[data['NumCPU'] == n]
for w in data_n['worker_id'].unique():
data_n_w = data_n[data_n['worker_id'] == w]
prev_time = 0
for task in data_n_w['task'].unique():
time = data_n_w[data_n_w['task'] == task]['time [s]']
if any(time):
ax[ix_n].bar(w, time, bottom=prev_time, color=colors(int(task)),
linewidth=0.3,
edgecolor=(0.2, 0.2, 0.2)
)
prev_time += float(time)
def plot_partial_derivative_per_benchmark(data, figsize=(20, 10)):
N_tasks = len(data['task'].unique())
colors = plt.cm.get_cmap('prism', N_tasks)
fig, ax = plt.subplots(2, 5, sharey=True, figsize=figsize)
ax = ax.flatten()
for ix_b, b in enumerate(data['benchmark_number'].unique()):
data_b = data[data['benchmark_number'] == b]
for w in data_b['worker_id'].unique():
data_b_w = data_b[data_b['worker_id'] == w]
prev_time = 0
for task in data_b_w['task'].unique():
time = data_b_w[data_b_w['task'] == task]['time [s]']
if any(time):
ax[ix_b].bar(w, time, bottom=prev_time, color=colors(int(task)),
linewidth=0.3,
edgecolor=(0.2, 0.2, 0.2)
)
prev_time += float(time)
def plot_partial_derivative_per_gradient(data, figsize=(20, 1.61 * 20), wrap=8):
N_tasks = len(data['task'].unique())
colors = plt.cm.get_cmap('jet', N_tasks)
fig, ax = egp.plot.subplots(len(data['gradient number'].unique()), wrap=wrap,
sharey=True, figsize=figsize)
try:
ax = ax.flatten()
except AttributeError:
ax = [ax]
for b in data['benchmark_number'].unique():
data_b = data[data['benchmark_number'] == b]
for ix_g, g in enumerate(data_b['gradient number'].unique()):
data_b_g = data_b[data_b['gradient number'] == g]
for w in data_b_g['worker_id'].unique():
data_b_g_w = data_b_g[data_b_g['worker_id'] == w]
prev_time = 0
for task in data_b_g_w['task'].unique():
time = data_b_g_w[data_b_g_w['task'] == task]['time [s]']
if any(time):
ax[ix_g].bar(w, time, bottom=prev_time, color=colors(int(task)),
linewidth=0.3,
edgecolor=(0.2, 0.2, 0.2)
)
prev_time += float(time)
|
apache-2.0
|
reuk/waveguide
|
bin/siltanen2013/analysis.py
|
2
|
1872
|
#!/usr/local/bin/python
import argparse
import numpy as np
import pysndfile
import matplotlib
import matplotlib.pyplot as plt
import itertools
import os.path
SPEED_OF_SOUND = 340
def modal_analysis(fnames, max_frequency, room_dim=None):
plt.figure()
for fname in fnames:
sndfile = pysndfile.PySndfile(fname, 'r')
if sndfile.channels() != 1:
raise RuntimeError('please only load mono files')
n = sndfile.frames()
sr = sndfile.samplerate()
samples = sndfile.read_frames()
fft = np.abs(np.fft.rfft(samples))
freqs = np.fft.rfftfreq(n, d=1. / sr)
mask = freqs < max_frequency
fft = 20 * np.log10(fft[mask])
freqs = freqs[mask]
plt.plot(freqs, fft, label=os.path.basename(fname))
if room_dim is not None:
ranges = [[(x / i) ** 2 for x in range(10)] for i in room_dim]
all_frequencies = [(SPEED_OF_SOUND / 2) * np.sqrt(a + b + c)
for a, b, c in itertools.product(*ranges)]
filtered_frequencies = [i for i in all_frequencies if i < max_frequency]
for f in filtered_frequencies:
plt.axvline(f)
plt.legend()
plt.show()
def main():
parser = argparse.ArgumentParser(
description='do modal analysis on some set of files')
parser.add_argument(
'fnames',
type=str,
nargs='+',
help='a list of files to analyse')
parser.add_argument(
'--room_dim',
type=float,
nargs=3,
help='the dimensions of the room w/h/l')
parser.add_argument(
'--max_frequency',
type=float,
nargs=1,
default=150,
help='analyse up to this frequency')
args = parser.parse_args()
modal_analysis(args.fnames, args.max_frequency, args.room_dim)
if __name__ == '__main__':
main()
|
gpl-2.0
|
JackKelly/neuralnilm_prototype
|
scripts/e177.py
|
2
|
6394
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
above_thresh_sq_error = sq_error[(t > THRESHOLD).nonzero()]
below_thresh_sq_error = sq_error[(t <= THRESHOLD).nonzero()]
above_thresh_mean = above_thresh_sq_error.mean()
below_thresh_mean = below_thresh_sq_error.mean()
above_thresh_mean = ifelse(T.isnan(above_thresh_mean), 0.0, above_thresh_mean)
below_thresh_mean = ifelse(T.isnan(below_thresh_mean), 0.0, below_thresh_mean)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
# LR of 0.1 didn't NaN but didn't learn well.
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=None,#[200, 100, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=.0001, clip_range=(-1, 1)),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
mit
|
fabianp/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
259
|
3265
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
bsd-3-clause
|
wkfwkf/statsmodels
|
statsmodels/graphics/tests/test_correlation.py
|
31
|
1112
|
import numpy as np
from numpy.testing import dec
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
from statsmodels.datasets import randhie
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_plot_corr():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr(corr_matrix, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, normcolor=True, title='', cmap='jet')
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_corr_grid():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr_grid([corr_matrix] * 2, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 5, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 3, normcolor=True, titles='', cmap='jet')
plt.close(fig)
|
bsd-3-clause
|
JavierGarciaD/AlgoRepo
|
EChanBook2/example_2_6.py
|
1
|
2855
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from functions import *
""" This is an implementation of the example 2.6 from Ernest Chan's
book ALGORITHMIC TRADING - Winning Strategies and Their Rationale
"""
if __name__ == "__main__":
#import data from CSV file
root_path = '/Users/Javi/Documents/MarketData/'
# the paths
# MAC: '/Users/Javi/Documents/MarketData/'
# WIN: 'C:/Users/javgar119/Documents/Python/Data'
filename_x = 'EWC_EWA_daily.csv'
#filename_y = 'ECOPETROL_ADR.csv'
full_path_x = root_path + filename_x
#full_path_y = root_path + filename_y
data = pd.read_csv(full_path_x, index_col='Date')
#create a series with the data range asked
#start_date = '2009-10-02'
#end_date = '2011-12-30'
#data = subset_dataframe(data, start_date, end_date)
y = data['EWC']
x = data['EWA']
y_ticket = 'EWC'
x_ticket = 'EWA'
# z = data['IGE']
k = polyfit(x,y,1)
xx = linspace(min(x),max(x),1000)
yy = polyval(k,xx)
lookback = 100
modelo2 = pd.ols(y=y, x=x, window_type='rolling', window=lookback)
data = data[lookback-1:]
betas = modelo2.beta
#calculate the number of units for the strategy
numunits = subtract(data[x_ticket], multiply(betas['x'], data[y_ticket]))
model = sm.OLS(y, x)
results = model.fit()
#print(results.params)
# cointegration test
resultsCOIN = cointegration_test(x,y)
print('****** COINTEGRATION TEST RESULTS FOR {} & [] *****'.format(x_ticket, y_ticket))
print('cointegration t-stat: {}'.format(round(resultsCOIN[0],4)))
print('p-value: {}'.format(round(round(resultsCOIN[1],4))))
print('usedlag: {}'.format(round(round(resultsCOIN[2],4))))
print('nobs: {}'.format(round(resultsCOIN[3],4)))
print('critical values: {}'.format(resultsCOIN[4]))
#*************************************************
# plotting the chart
#*************************************************
#plot of numunits
fig = plt.figure()
ax = fig.add_subplot(311)
ax.plot(data)
ax.set_title(x_ticket + ' & ' + y_ticket)
ax.set_xlabel('Data points')
ax.set_ylabel('Price')
#ax.text(1000, -12, 'Seems like mean reverting')
#plot of datapoints
ax = fig.add_subplot(312)
ax.plot(numunits)
ax.set_title(x_ticket + ' - (HedgeRatio * ' + y_ticket + ')')
ax.set_xlabel(x_ticket)
ax.set_ylabel(y_ticket)
#plot of datapoints
ax = fig.add_subplot(313)
ax.plot(x,y,'o')
ax.plot(xx,yy,'r')
ax.set_title(x_ticket + ' vs. ' + y_ticket)
ax.set_xlabel(x_ticket)
ax.set_ylabel(y_ticket)
plt.show()
|
apache-2.0
|
MrCodeYu/spark
|
python/pyspark/sql/dataframe.py
|
3
|
58143
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SQLContext`::
people = sqlContext.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SQLContext
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SQLContext`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Experimental.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Experimental
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: Whether truncate long strings and align cells right.
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
print(self._jdf.showString(n, truncate))
def __repr__(self):
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.collectToPython()
return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.toPythonIterator()
return _load_from_socket(port, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
""" Persists with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""Sets the storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions.
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
``numPartitions`` can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement, fraction, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
>>> df.sample(False, 0.5, 42).count()
2
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd = self._jdf.sample(withReplacement, fraction, long(seed))
return DataFrame(rdd, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
"""
if not isinstance(col, str):
raise ValueError("col must be a string, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default 'inner'.
One of `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is None or len(on) == 0:
jdf = self._jdf.join(other._jdf)
elif isinstance(on[0], basestring):
if how is None:
jdf = self._jdf.join(other._jdf, self._jseq(on), "inner")
else:
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, self._jseq(on), how)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
if len(on) > 1:
on = reduce(lambda x, y: x.__and__(y), on)
else:
on = on[0]
if how is None:
jdf = self._jdf.join(other._jdf, on._jc, "inner")
else:
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on._jc, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes statistics for numeric columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical columns.
.. note:: This function is meant for exploratory data analysis, as we make no \
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe().show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe(['age', 'name']).show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
.. note:: Deprecated in 2.0, use union instead.
"""
return self.union(other)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, dict)):
raise ValueError("value should be a float, int, long, string, or dict")
if isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
:param to_replace: int, long, float, string, or list.
Value to be replaced.
If the value is a dict, then `value` is ignored and `to_replace` must be a
mapping from column name (string) to replacement value. The value to be
replaced must be an int, long, float, or string.
:param value: int, long, float, string, or list.
Value to use to replace holes.
The replacement value must be an int, long, float, or string. If `value` is a
list or tuple, `value` should be of the same length with `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if not isinstance(to_replace, (float, int, long, basestring, list, tuple, dict)):
raise ValueError(
"to_replace should be a float, int, long, string, list, tuple, or dict")
if not isinstance(value, (float, int, long, basestring, list, tuple)):
raise ValueError("value should be a float, int, long, string, list, or tuple")
rep_dict = dict()
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, tuple):
to_replace = list(to_replace)
if isinstance(value, tuple):
value = list(value)
if isinstance(to_replace, list) and isinstance(value, list):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length")
rep_dict = dict(zip(to_replace, value))
elif isinstance(to_replace, list) and isinstance(value, (float, int, long, basestring)):
rep_dict = dict([(tr, value) for tr in to_replace])
elif isinstance(to_replace, dict):
rep_dict = to_replace
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
elif isinstance(subset, basestring):
subset = [subset]
if not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of a numerical column of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[http://dx.doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
:param col: the name of the numerical column
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities
"""
if not isinstance(col, str):
raise ValueError("col should be a string.")
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
return list(jaq)
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no \
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param col: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, col):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param col: a string name of the column to drop, or a
:class:`Column` to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
"""
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
Note that this method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
This is only available if Pandas is installed and available.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
import pandas as pd
return pd.DataFrame.from_records(self.collect(), columns=self.columns)
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
rohanp/scikit-learn
|
sklearn/feature_selection/rfe.py
|
5
|
15669
|
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv.split(X, y)):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores / cv.get_n_splits(X, y)
return self
|
bsd-3-clause
|
trankmichael/scikit-learn
|
examples/plot_kernel_ridge_regression.py
|
230
|
6222
|
"""
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
MrSnede/BalancingWheelRobot
|
mplwidget.py
|
1
|
4357
|
#!/usr/bin/env python3
# http://stackoverflow.com/questions/12459811/how-to-embed-matplotib-in-pyqt-for-dummies
# Python Qt4 bindings for GUI objects
from PyQt4 import QtGui
# import the Qt4Agg FigureCanvas object, that binds Figure to
# Qt4Agg backend. It also inherits from QWidget
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from collections import deque
from matplotlib.figure import Figure
import matplotlib.animation as animation
#import matplotlib.pyplot as plt
class PlotValueBuffer:
def __init__(self, maxLen): # todo: maxlen wird mehrfach gesetzt
self.ax = deque([0.0]*maxLen) # todo: besserer Name fuer ax + ay
self.ay = deque([0.0]*maxLen)
self.maxLen = maxLen
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
print(buf)
def add(self, datastring):
"""wird aus BotControllGUI aufgreufen, sobald ein neues Messwertdopel
von der seriellen Konsole gelesen wurde"""
datastring = datastring.decode('utf-8').rstrip("\r\n")
data = [float(val) for val in datastring.split()]
assert(len(data) == 2)
self.addToBuf(self.ax, data[0])
self.addToBuf(self.ay, data[1])
def update(self, frameNum, a0, a1):
a0.set_data(range(self.maxLen), self.ax)
a1.set_data(range(self.maxLen), self.ay)
return a0
# Uebergabewerte und Rueckgabe sind Vorgabe von Matplotlib
class MplCanvas(FigureCanvas):
"""Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure(facecolor='snow', edgecolor='white')
self.fig.patch.set_alpha(0.0)
self.fig.set_tight_layout(True)
self.ax = self.fig.add_subplot(111)
self.ax.grid(b=True, which='major', color='b', linewidth=1.5)
self.ax.grid(b=True, which='minor', color='r', linewidth=0.5)
self.ax.set_axisbelow(True)
self.ax.set_ylim([-60, 60])
self.ax.set_xlim([0, 1000]) # todo: maxlen wird mehrfach gesetzt
self.ax.axhline(y=0, linewidth=1.5, color='k')
self.ax.set_ylabel('Neigung °', fontsize=14,
fontweight='bold', color='b')
self.line_Plot1, = self.ax.plot([], [], label='Neigung °',
linewidth=1.0, linestyle="-")
self.line_Plot2, = self.ax.plot([], [], label='PID Antwort',
linewidth=1.0, linestyle="-")
#self.anim = animation.FuncAnimation(self.fig, PlotValueBuffer.update,
#fargs=(self.line_Plot1, self.line_Plot2),
#interval=100)
# Values < 30 will rise a Tkinter Error
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
# we define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# notify the system of updated policy
FigureCanvas.updateGeometry(self)
# this is the Navigation widget
# it takes the Canvas widget and a parent
#self.toolbar = NavigationToolbar(self.FigureCanvas, self)
class MplWidget(QtGui.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent=None):
# initialization of Qt MainWindow widget
QtGui.QWidget.__init__(self, parent)
# create a Buffer # todo: maxlen wird mehrfach gesetzt
self.valueBuffer = PlotValueBuffer(maxLen=1000)
# set the canvas to the Matplotlib widget
self.canvas = MplCanvas()
# create a vertical box layout
self.vbl = QtGui.QVBoxLayout()
# add mpl widget to the vertical box
self.vbl.addWidget(self.canvas)
# set the layout to the vertical box
self.setLayout(self.vbl)
|
gpl-2.0
|
ahadmushir/whatsCooking
|
finalModel.py
|
1
|
4080
|
import pandas as pd
import numpy as np
import nltk
import re
from nltk.stem import WordNetLemmatizer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn import grid_search
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
"""
Soft Voting/Majority Rule classifier
This module contains a Soft Voting/Majority Rule classifier for
classification clfs.
"""
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import TransformerMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
#Reading training file
traindf = pd.read_json("../input/train.json")
# traindf['ingredients_clean_string'] = [' , '.join(z).strip() for z in traindf['ingredients']]
####
#using lematizer from NLTK library
traindf['ingredients_string'] = [' '.join([WordNetLemmatizer().lemmatize(re.sub('[^A-Za-z]', ' ', line)) for line in lists]).strip() for lists in traindf['ingredients']]
#Reading Test
testdf = pd.read_json("../input/test.json")
# testdf['ingredients_clean_string'] = [' , '.join(z).strip() for z in testdf['ingredients']]
####
#using lematizer from NLTK library
testdf['ingredients_string'] = [' '.join([WordNetLemmatizer().lemmatize(re.sub('[^A-Za-z]', ' ', line)) for line in lists]).strip() for lists in testdf['ingredients']]
####
#Making TF-IDF vector
corpustr = traindf['ingredients_string']
vectorizertr = TfidfVectorizer(stop_words='english', ngram_range = ( 1, 1),analyzer="word",
max_df = .6 , binary=False , token_pattern=r'\w+' , sublinear_tf=False, norm = 'l2')
#vectorizertr = HashingVectorizer(stop_words='english',
# ngram_range = ( 1 , 2 ),analyzer="word", token_pattern=r'\w+' , n_features = 7000)
tfidftr = vectorizertr.fit_transform(corpustr).todense()
corpusts = testdf['ingredients_string']
vectorizerts = TfidfVectorizer(stop_words='english', ngram_range = ( 1, 1),analyzer="word",
max_df = .6 , binary=False , token_pattern=r'\w+' , sublinear_tf=False, norm = 'l2')
#vectorizerts = HashingVectorizer(stop_words='english',
# ngram_range = ( 1 , 2 ),analyzer="word", token_pattern=r'\w+' , n_features = 7000)
tfidfts = vectorizertr.transform(corpusts)
predictors_tr = tfidftr
targets_tr = traindf['cuisine']
predictors_ts = tfidfts
################################
# Initialize classifiers
################################
np.random.seed(1)
print("Ensemble: LR - linear SVC")
clf1 = LogisticRegression(random_state=1, C=7)
clf2 = LinearSVC(random_state=1, C=0.4, penalty="l2", dual=False)
nb = BernoulliNB()
rfc = RandomForestClassifier(random_state=1, criterion = 'gini', n_estimators=500)
sgd = SGDClassifier(random_state=1, alpha=0.00001, penalty='l2', n_iter=50)
eclf = EnsembleClassifier(clfs=[clf1, clf2,nb, rfc, sgd], weights=[2, 2, 1, 1,2])
np.random.seed(1)
for clf, label in zip([eclf],
['Ensemble']):
scores = cross_validation.cross_val_score(clf, predictors_tr,targets_tr, cv=2, scoring='accuracy')
print("Accuracy: %0.4f (+/- %0.5f) [%s]" % (scores.mean(), scores.std(), label))
eclf2 = eclf.fit(predictors_tr,targets_tr)
predictions = eclf2.predict(predictors_ts)
testdf['cuisine'] = predictions
#testdf = testdf.sort('id' , ascending=True)
testdf = testdf.sort_values(by='id' , ascending=True)
##show the detail
# testdf[['id' , 'ingredients_clean_string' , 'cuisine' ]].to_csv("info_vote2.csv")
#for submit, no index
testdf[['id' , 'cuisine' ]].to_csv("just_python_cooking-vote1.csv", index=False)
|
apache-2.0
|
chenyyx/scikit-learn-doc-zh
|
examples/en/applications/plot_species_distribution_modeling.py
|
35
|
7372
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
|
gpl-3.0
|
UltronAI/Deep-Learning
|
Pattern-Recognition/hw2-Feature-Selection/skfeature/function/wrapper/svm_forward.py
|
1
|
1726
|
import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
def svm_forward(X, y, n_selected_features):
"""
This function implements the forward feature selection algorithm based on SVM
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
y: {numpy array}, shape (n_samples,)
input class labels
n_selected_features: {int}
number of selected features
Output
------
F: {numpy array}, shape (n_features, )
index of selected features
"""
n_samples, n_features = X.shape
# using 10 fold cross validation
cv = KFold(n_samples, n_folds=10, shuffle=True)
# choose SVM as the classifier
clf = SVC()
# selected feature set, initialized to be empty
F = []
count = 0
while count < n_selected_features:
max_acc = 0
for i in range(n_features):
if i not in F:
F.append(i)
X_tmp = X[:, F]
acc = 0
for train, test in cv:
clf.fit(X_tmp[train], y[train])
y_predict = clf.predict(X_tmp[test])
acc_tmp = accuracy_score(y[test], y_predict)
acc += acc_tmp
acc = float(acc)/10
F.pop()
# record the feature which results in the largest accuracy
if acc > max_acc:
max_acc = acc
idx = i
# add the feature which results in the largest accuracy
F.append(idx)
count += 1
return np.array(F)
|
mit
|
BigDataforYou/movie_recommendation_workshop_1
|
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/core/reshape.py
|
1
|
40542
|
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import numpy as np
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse import SparseDataFrame, SparseSeries
from pandas.sparse.array import SparseArray
from pandas._sparse import IntIndex
from pandas.core.categorical import Categorical
from pandas.core.common import notnull, _ensure_platform_int, _maybe_promote
from pandas.core.groupby import get_group_index, _compress_group_index
import pandas.core.common as com
import pandas.types.concat as _concat
import pandas.core.algorithms as algos
import pandas.algos as _algos
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 2
b 3 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = _compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = algos.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
values = [Categorical.from_array(
values[:, i], categories=self.is_categorical.categories,
ordered=True) for i in range(values.shape[-1])]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
# if our mask is all True, then we can use our existing dtype
if self.mask.all():
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in range(values.shape[1]):
chunk = new_values[:, i * width:(i + 1) * width]
mask_chunk = new_mask[:, i * width:(i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs):
from pandas.core.groupby import decons_obs_group_ids
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = _compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = Series(data.values, index=dummy_index)
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = DataFrame(data.values, index=dummy_index, columns=data.columns)
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index, self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sortlevel(0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items,
fill_value=fill_value)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.ix[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value)
return unstacker.get_result()
def get_compressed_ids(labels, sizes):
from pandas.core.groupby import get_group_index
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return _compress_group_index(ids, sort=True)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
cat = Categorical(index, ordered=True)
return cat.categories, cat.codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sortlevel(level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
slice_len = loc.stop - loc.start
# can make more efficient?
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.ix[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level_num])
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def melt(frame, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> pd.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
# TODO: what about the existing index?
if id_vars is not None:
if not isinstance(id_vars, (tuple, list, np.ndarray)):
id_vars = [id_vars]
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not isinstance(value_vars, (tuple, list, np.ndarray)):
value_vars = [value_vars]
frame = frame.ix[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i
for i in range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns.get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2008], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team hr year
0 Red Sox 514 2007
1 Yankees 573 2007
2 Red Sox 545 2008
3 Yankees 526 2008
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
mdata[target] = _concat._concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notnull(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : list
A list of stub names. The wide format variables are assumed to
start with the stub names.
i : str
The name of the id variable.
j : str
The name of the subobservation variable.
stubend : str
Regex to match for the end of the stubs.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable as well as
variables for i and j.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> wide_to_long(df, ["A", "B"], i="id", j="year")
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
Notes
-----
All extra variables are treated as extra id variables. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, regex):
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^" + stub)
newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
except ValueError:
pass
newdf[j] = newdf_j
return newdf
id_vars = get_var_names(df, "^(?!%s)" % "|".join(stubnames))
if i not in id_vars:
id_vars += [i]
newdf = melt_stub(df, stubnames[0], id_vars, j)
for stub in stubnames[1:]:
new = melt_stub(df, stub, id_vars, j)
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
drop_first : bool, default False
Whether to get k-1 dummies out of n categorical levels by removing the
first level.
.. versionadded:: 0.18.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True))
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
See Also
--------
Series.str.get_dummies
"""
from pandas.tools.merge import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did not match the length of "
"the columns being encoded ({2}).")
if com.is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse, drop_first=drop_first)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False):
# Series avoids inconsistent NaN handling
cat = Categorical.from_array(Series(data), ordered=True)
levels = cat.categories
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = cat.codes.copy()
if dummy_na:
codes[codes == -1] = len(cat.categories)
levels = np.append(cat.categories, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs)),
sparse_index=IntIndex(N, ixs), fill_value=0)
sparse_series[col] = SparseSeries(data=sarr, index=index)
return SparseDataFrame(sparse_series, index=index, columns=dummy_cols)
else:
dummy_mat = np.eye(number_of_cols).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
cat = Categorical.from_array(mapped_items.take(labels), ordered=True)
labels = cat.codes
items = cat.categories
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
|
mit
|
mohanprasath/Course-Work
|
data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part06-e07_binding_sites/src/binding_sites.py
|
1
|
2468
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import accuracy_score
from sklearn.metrics import pairwise_distances
import scipy
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
def toint(x):
if type('x') == 'list':
print(x)
r = input("You")
reference = {'A':0, 'C':1, 'G':2, 'T':3}
return reference[x]
def find_permutation(n_clusters, real_labels, labels):
permutation = []
for i in range(n_clusters):
idx = labels == i
# Choose the most common label among data points in the cluster
new_label = scipy.stats.mode(real_labels[idx])[0][0]
permutation.append(new_label)
return permutation
def get_features_and_labels(filename):
df = pd.read_csv(filename, sep='\t')
# print(df.head())
labels = df.loc[:, 'y':]
features = df.loc[:, 'X'].apply(lambda x: [toint(a) for a in x])
return (np.array(features.tolist()), labels)
def plot(distances, method='average', affinity='euclidean'):
mylinkage = hc.linkage(sp.distance.squareform(distances), method=method)
g=sns.clustermap(distances, row_linkage=mylinkage, col_linkage=mylinkage )
g.fig.suptitle(f"Hierarchical clustering using {method} linkage and {affinity} affinity")
plt.show()
def cluster_euclidean(filename):
X, y = get_features_and_labels(filename)
model = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='average')
y_predicted = model.fit_predict(X, y)
permutation = find_permutation(2, y, y_predicted)
new_labels = [permutation[label] for label in model.labels_]
return accuracy_score(y, new_labels)
def cluster_hamming(filename):
X, y = get_features_and_labels(filename)
model = AgglomerativeClustering(n_clusters=2, affinity='precomputed', linkage='average')
distance = pairwise_distances(X, metric='hamming')
y_predicted = model.fit_predict(distance, y)
permutation = find_permutation(2, y, y_predicted)
new_labels = [permutation[label] for label in model.labels_]
return accuracy_score(y, new_labels)
def main():
print("Accuracy score with Euclidean affinity is", cluster_euclidean("src/data.seq"))
print("Accuracy score with Hamming affinity is", cluster_hamming("src/data.seq"))
if __name__ == "__main__":
main()
|
gpl-3.0
|
marcoantoniooliveira/labweb
|
oscar/lib/python2.7/site-packages/IPython/qt/console/qtconsoleapp.py
|
2
|
13295
|
""" A minimal application using the Qt console-style IPython frontend.
This is not a complete console app, as subprocess will not be able to receive
input, there is no real readline support, among other limitations.
Authors:
* Evan Patterson
* Min RK
* Erik Tollerud
* Fernando Perez
* Bussonnier Matthias
* Thomas Kluyver
* Paul Ivanov
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib imports
import os
import signal
import sys
# If run on Windows, install an exception hook which pops up a
# message box. Pythonw.exe hides the console, so without this
# the application silently fails to load.
#
# We always install this handler, because the expectation is for
# qtconsole to bring up a GUI even if called from the console.
# The old handler is called, so the exception is printed as well.
# If desired, check for pythonw with an additional condition
# (sys.executable.lower().find('pythonw.exe') >= 0).
if os.name == 'nt':
old_excepthook = sys.excepthook
def gui_excepthook(exctype, value, tb):
try:
import ctypes, traceback
MB_ICONERROR = 0x00000010L
title = u'Error starting IPython QtConsole'
msg = u''.join(traceback.format_exception(exctype, value, tb))
ctypes.windll.user32.MessageBoxW(0, msg, title, MB_ICONERROR)
finally:
# Also call the old exception hook to let it do
# its thing too.
old_excepthook(exctype, value, tb)
sys.excepthook = gui_excepthook
# System library imports
from IPython.external.qt import QtCore, QtGui
# Local imports
from IPython.config.application import catch_config_error
from IPython.core.application import BaseIPythonApplication
from IPython.qt.console.ipython_widget import IPythonWidget
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget
from IPython.qt.console import styles
from IPython.qt.console.mainwindow import MainWindow
from IPython.qt.client import QtKernelClient
from IPython.qt.manager import QtKernelManager
from IPython.utils.traitlets import (
Dict, Unicode, CBool, Any
)
from IPython.consoleapp import (
IPythonConsoleApp, app_aliases, app_flags, flags, aliases
)
#-----------------------------------------------------------------------------
# Network Constants
#-----------------------------------------------------------------------------
from IPython.utils.localinterfaces import LOCALHOST, LOCAL_IPS
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
_examples = """
ipython qtconsole # start the qtconsole
ipython qtconsole --matplotlib=inline # start with matplotlib inline plotting mode
"""
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
# start with copy of flags
flags = dict(flags)
qt_flags = {
'plain' : ({'IPythonQtConsoleApp' : {'plain' : True}},
"Disable rich text support."),
}
# and app_flags from the Console Mixin
qt_flags.update(app_flags)
# add frontend flags to the full set
flags.update(qt_flags)
# start with copy of front&backend aliases list
aliases = dict(aliases)
qt_aliases = dict(
style = 'IPythonWidget.syntax_style',
stylesheet = 'IPythonQtConsoleApp.stylesheet',
colors = 'ZMQInteractiveShell.colors',
editor = 'IPythonWidget.editor',
paging = 'ConsoleWidget.paging',
)
# and app_aliases from the Console Mixin
qt_aliases.update(app_aliases)
qt_aliases.update({'gui-completion':'ConsoleWidget.gui_completion'})
# add frontend aliases to the full set
aliases.update(qt_aliases)
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
qt_aliases = set(qt_aliases.keys())
qt_aliases.remove('colors')
qt_flags = set(qt_flags.keys())
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# IPythonQtConsole
#-----------------------------------------------------------------------------
class IPythonQtConsoleApp(BaseIPythonApplication, IPythonConsoleApp):
name = 'ipython-qtconsole'
description = """
The IPython QtConsole.
This launches a Console-style application using Qt. It is not a full
console, in that launched terminal subprocesses will not be able to accept
input.
The QtConsole supports various extra features beyond the Terminal IPython
shell, such as inline plotting with matplotlib, via:
ipython qtconsole --matplotlib=inline
as well as saving your session as HTML, and printing the output.
"""
examples = _examples
classes = [IPythonWidget] + IPythonConsoleApp.classes
flags = Dict(flags)
aliases = Dict(aliases)
frontend_flags = Any(qt_flags)
frontend_aliases = Any(qt_aliases)
kernel_client_class = QtKernelClient
kernel_manager_class = QtKernelManager
stylesheet = Unicode('', config=True,
help="path to a custom CSS stylesheet")
hide_menubar = CBool(False, config=True,
help="Start the console window with the menu bar hidden.")
maximize = CBool(False, config=True,
help="Start the console window maximized.")
plain = CBool(False, config=True,
help="Use a plaintext widget instead of rich text (plain can't print/save).")
def _plain_changed(self, name, old, new):
kind = 'plain' if new else 'rich'
self.config.ConsoleWidget.kind = kind
if new:
self.widget_factory = IPythonWidget
else:
self.widget_factory = RichIPythonWidget
# the factory for creating a widget
widget_factory = Any(RichIPythonWidget)
def parse_command_line(self, argv=None):
super(IPythonQtConsoleApp, self).parse_command_line(argv)
self.build_kernel_argv(argv)
def new_frontend_master(self):
""" Create and return new frontend attached to new kernel, launched on localhost.
"""
kernel_manager = self.kernel_manager_class(
connection_file=self._new_connection_file(),
parent=self,
autorestart=True,
)
# start the kernel
kwargs = dict()
kwargs['extra_arguments'] = self.kernel_argv
kernel_manager.start_kernel(**kwargs)
kernel_manager.client_factory = self.kernel_client_class
kernel_client = kernel_manager.client()
kernel_client.start_channels(shell=True, iopub=True)
widget = self.widget_factory(config=self.config,
local_kernel=True)
self.init_colors(widget)
widget.kernel_manager = kernel_manager
widget.kernel_client = kernel_client
widget._existing = False
widget._may_close = True
widget._confirm_exit = self.confirm_exit
return widget
def new_frontend_slave(self, current_widget):
"""Create and return a new frontend attached to an existing kernel.
Parameters
----------
current_widget : IPythonWidget
The IPythonWidget whose kernel this frontend is to share
"""
kernel_client = self.kernel_client_class(
connection_file=current_widget.kernel_client.connection_file,
config = self.config,
)
kernel_client.load_connection_file()
kernel_client.start_channels()
widget = self.widget_factory(config=self.config,
local_kernel=False)
self.init_colors(widget)
widget._existing = True
widget._may_close = False
widget._confirm_exit = False
widget.kernel_client = kernel_client
widget.kernel_manager = current_widget.kernel_manager
return widget
def init_qt_app(self):
# separate from qt_elements, because it must run first
self.app = QtGui.QApplication([])
def init_qt_elements(self):
# Create the widget.
base_path = os.path.abspath(os.path.dirname(__file__))
icon_path = os.path.join(base_path, 'resources', 'icon', 'IPythonConsole.svg')
self.app.icon = QtGui.QIcon(icon_path)
QtGui.QApplication.setWindowIcon(self.app.icon)
ip = self.ip
local_kernel = (not self.existing) or ip in LOCAL_IPS
self.widget = self.widget_factory(config=self.config,
local_kernel=local_kernel)
self.init_colors(self.widget)
self.widget._existing = self.existing
self.widget._may_close = not self.existing
self.widget._confirm_exit = self.confirm_exit
self.widget.kernel_manager = self.kernel_manager
self.widget.kernel_client = self.kernel_client
self.window = MainWindow(self.app,
confirm_exit=self.confirm_exit,
new_frontend_factory=self.new_frontend_master,
slave_frontend_factory=self.new_frontend_slave,
)
self.window.log = self.log
self.window.add_tab_with_frontend(self.widget)
self.window.init_menu_bar()
# Ignore on OSX, where there is always a menu bar
if sys.platform != 'darwin' and self.hide_menubar:
self.window.menuBar().setVisible(False)
self.window.setWindowTitle('IPython')
def init_colors(self, widget):
"""Configure the coloring of the widget"""
# Note: This will be dramatically simplified when colors
# are removed from the backend.
# parse the colors arg down to current known labels
try:
colors = self.config.ZMQInteractiveShell.colors
except AttributeError:
colors = None
try:
style = self.config.IPythonWidget.syntax_style
except AttributeError:
style = None
try:
sheet = self.config.IPythonWidget.style_sheet
except AttributeError:
sheet = None
# find the value for colors:
if colors:
colors=colors.lower()
if colors in ('lightbg', 'light'):
colors='lightbg'
elif colors in ('dark', 'linux'):
colors='linux'
else:
colors='nocolor'
elif style:
if style=='bw':
colors='nocolor'
elif styles.dark_style(style):
colors='linux'
else:
colors='lightbg'
else:
colors=None
# Configure the style
if style:
widget.style_sheet = styles.sheet_from_template(style, colors)
widget.syntax_style = style
widget._syntax_style_changed()
widget._style_sheet_changed()
elif colors:
# use a default dark/light/bw style
widget.set_default_style(colors=colors)
if self.stylesheet:
# we got an explicit stylesheet
if os.path.isfile(self.stylesheet):
with open(self.stylesheet) as f:
sheet = f.read()
else:
raise IOError("Stylesheet %r not found." % self.stylesheet)
if sheet:
widget.style_sheet = sheet
widget._style_sheet_changed()
def init_signal(self):
"""allow clean shutdown on sigint"""
signal.signal(signal.SIGINT, lambda sig, frame: self.exit(-2))
# need a timer, so that QApplication doesn't block until a real
# Qt event fires (can require mouse movement)
# timer trick from http://stackoverflow.com/q/4938723/938949
timer = QtCore.QTimer()
# Let the interpreter run each 200 ms:
timer.timeout.connect(lambda: None)
timer.start(200)
# hold onto ref, so the timer doesn't get cleaned up
self._sigint_timer = timer
@catch_config_error
def initialize(self, argv=None):
self.init_qt_app()
super(IPythonQtConsoleApp, self).initialize(argv)
IPythonConsoleApp.initialize(self,argv)
self.init_qt_elements()
self.init_signal()
def start(self):
# draw the window
if self.maximize:
self.window.showMaximized()
else:
self.window.show()
self.window.raise_()
# Start the application main loop.
self.app.exec_()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
def main():
app = IPythonQtConsoleApp()
app.initialize()
app.start()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
drwalshaw/sc-python
|
analyse.py
|
1
|
1492
|
import sys
import numpy
import matplotlib.pyplot
def detect_problems (filename):
"""some of our temperature files haave problems, check for these
this function reads a file and reports on odd looking maxima and minimia that add to zero
the function does not return any data
"""
data=numpy.loadtxt(fname=filename, delimiter=',')
if numpy.max (data, axis=0)[0] ==0 and numpy.max (data, axis=0)[20] ==20:
print ('suspicious looking maxima')
elif numpy.sum(numpy.min(data, axis=0)) ==0:
print ('minimum adds to zero')
else:
print ('data looks ok')
def analyse (filename, outfile=None):
data=numpy.loadtxt(fname=filename,delimiter=',')
""" this function analyses a dataset and outputs plots for maax min and ave
"""
fig=matplotlib.pyplot.figure (figsize=(10.0,3.0))
subplot1=fig.add_subplot (1,3,1)
subplot2=fig.add_subplot (1,3,2)
subplot3=fig.add_subplot (1,3,3)
subplot1.set_ylabel('average')
subplot1.plot(numpy.mean(data, axis=0))
subplot2.set_ylabel('minimum')
subplot2.plot(numpy.min(data, axis=0))
subplot3.set_ylabel('maximum')
subplot3.plot(numpy.max(data, axis=0))
fig.tight_layout()
if outfile is None:
matplotlib.pyplot.show
else:
matplotlib.pyplot.savefig(outfile)
print("running",sys.argv[0])
print (sys.argv[1])
analyse (sys.argv[1], outfile=sys.argv[2])
detect_problems (sys.argv[1])
|
mit
|
AndiH/QuantifiedSelf
|
Typingstats/keyHeatmap_visulization.py
|
1
|
1169
|
import numpy as np #numerical stuff
import pylab as pl #for poly line fit
import sys, datetime #for converting to human readable date
# import matplotlib.pyplot as plt
import prettyplotlib as ppl # makes nicer colors and generally better to look at graphs
from prettyplotlib import plt # This is "import matplotlib.pyplot as plt" from the prettyplotlib library
from prettyplotlib import mpl # This is "import matplotlib as mpl" from the prettyplotlib library
# change font to Open Sans (has some kerning issues, though)
# mpl.rcParams.update({'font.family':'Open Sans'})
# get name of file to process
inputFileName = sys.argv[1] # keyPresses_Heatmap.csv
# load csv file with EPOCHTIME;NOFPROCESSES
data = np.loadtxt(inputFileName, delimiter=";")
# data[:,1] = [x - notActuallyChromeTabs for x in data[:,1]]
#
print data[:,0], data[:,1]
fig = plt.figure(figsize=(14,8))
ax = fig.add_subplot(111)
# ppl.bar(ax, data[:,0], data[:,1])
ax.bar(data[:,0], data[:,1])
# ax.axis('tight')
plt.show()
# xy = [(x_int[i],y_int[i]) for i in range(0,len(x_int))]
# xy_sorted = np.array(sorted(xy, key=lambda yz: yz[1], reverse=True))
# ax.bar(xy_sorted[:,0], xy_sorted[:,1])
|
gpl-2.0
|
janchorowski/blocks-extras
|
tests/scripts/test_plot.py
|
4
|
2447
|
import tempfile
import blocks_extras.scripts.plot as plot
from collections import OrderedDict
from tests import silence_printing, skip_if_not_available
from numpy import nan, isfinite
from blocks.log import TrainingLog
from blocks.main_loop import MainLoop
from blocks.serialization import dump
try:
from pandas import DataFrame
PANDAS_AVAILABLE = True
except:
PANDAS_AVAILABLE = False
def some_experiments():
"""Create some 2 dummy experiments."""
experiments = OrderedDict()
experiments['exp0'] = DataFrame()
experiments['exp0']['col0'] = (0, 1, 2)
experiments['exp0']['col1'] = (3, 4, 5)
experiments['exp1'] = DataFrame()
experiments['exp1']['col0'] = (6, 7, 8, 9)
experiments['exp1']['col1'] = (9, 9, 9, 9)
return experiments
def test_load_log():
log = TrainingLog()
log[0]['channel0'] = 0
# test simple TrainingLog pickles
with tempfile.NamedTemporaryFile() as f:
dump(log, f)
f.flush()
log2 = plot.load_log(f.name)
assert log2[0]['channel0'] == 0
# test MainLoop pickles
main_loop = MainLoop(model=None, data_stream=None,
algorithm=None, log=log)
with tempfile.NamedTemporaryFile() as f:
dump(main_loop, f)
f.flush()
log2 = plot.load_log(f.name)
assert log2[0]['channel0'] == 0
@silence_printing
def test_print_column_summary():
skip_if_not_available(modules=['pandas'])
experiments = some_experiments()
plot.print_column_summary(experiments)
def test_match_column_specs():
skip_if_not_available(modules=['pandas'])
experiments = some_experiments()
specs = ['0:col0', '*1']
df = plot.match_column_specs(experiments, specs)
assert isinstance(df, DataFrame)
assert list(df.columns) == ['0:col0', '0:col1', '1:col1']
assert list(df.index) == [0, 1, 2, 3]
def test_interpolate():
skip_if_not_available(modules=['pandas'])
""" Ensure tha DataFrame.interpolate(method='nearest') has the
desired properties.
It is used by blocks-plot and should:
* interpolate missing/NaN datapoints between valid ones
* not replace any NaN before/after the first/last finite datapoint
"""
y = [nan, nan, 2., 3., nan, 5, nan, nan]
df = DataFrame(y)
df_ = df.interpolate(method='nearest')[0]
assert all(isfinite(df_[2:6]))
assert all(~isfinite(df_[0:2]))
assert all(~isfinite(df_[6:8]))
|
mit
|
catherinezucker/radfil
|
radfil/plot.py
|
1
|
10924
|
import numpy as np
import numbers
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from . import styles
def plotCuts(radobj, ax):
if hasattr(radobj, 'dictionary_cuts'):
dictionary_cuts = radobj.dictionary_cuts.copy()
else:
raise ValueError('Please run build_profile before plotting.')
# plot the peaks
if dictionary_cuts['plot_peaks'] is not None:
toPlot = np.asarray(dictionary_cuts['plot_peaks'])
ax.plot(toPlot[:, 0].astype(int), toPlot[:, 1].astype(int), 'b.',
markersize = 12.,
alpha=0.75,
zorder = 999, markeredgecolor='white',markeredgewidth=0.5)
# plot the cuts
if dictionary_cuts['plot_cuts'] is not None:
toPlot = dictionary_cuts['plot_cuts']
[ax.plot(np.asarray(cut)[:, 0], np.asarray(cut)[:, 1], 'r-', linewidth = 1.)\
for cut in toPlot]
return ax
class RadFilPlotter(object):
'''
A class to plot the results in radfil objects.
'''
def __init__(self, radobj):
self.radobj = radobj
def plotCuts(self, ax, savefig = False):
## prepare
vmin, vmax = np.nanmin(self.radobj.image[self.radobj.mask]), np.nanpercentile(self.radobj.image[self.radobj.mask], 98.)
xmin, xmax = np.where(self.radobj.mask)[1].min(), np.where(self.radobj.mask)[1].max()
ymin, ymax = np.where(self.radobj.mask)[0].min(), np.where(self.radobj.mask)[0].max()
if self.radobj.cutting:
## plotting
ax.imshow(self.radobj.image,
origin='lower',
cmap='gray',
interpolation='none',
norm = colors.Normalize(vmin = vmin, vmax = vmax))
ax.contourf(self.radobj.mask,
levels = [0., .5],
colors = 'w')
ax.plot(self.radobj.xspline, self.radobj.yspline, 'r', label='fit', lw=3, alpha=1.0)
ax.set_xlim(max(0., xmin-.1*(xmax-xmin)), min(self.radobj.mask.shape[1]-.5, xmax+.1*(xmax-xmin)))
ax.set_ylim(max(0., ymin-.1*(ymax-ymin)), min(self.radobj.mask.shape[0]-.5, ymax+.1*(ymax-ymin)))
else:
## plotting
ax.imshow(self.radobj.image,
origin='lower',
cmap='gray',
interpolation='none',
norm = colors.Normalize(vmin = vmin, vmax = vmax))
ax.contourf(self.radobj.mask,
levels = [0., .5],
colors = 'w')
ax.plot(line.xy[0], line.xy[1], 'r', label='fit', lw=2, alpha=0.5)
ax.set_xlim(max(0., xmin-.1*(xmax-xmin)), min(self.radobj.mask.shape[1]-.5, xmax+.1*(xmax-xmin)))
ax.set_ylim(max(0., ymin-.1*(ymax-ymin)), min(self.radobj.mask.shape[0]-.5, ymax+.1*(ymax-ymin)))
plotCuts(self.radobj, ax)
def plotFits(self, ax, plotFeature):
if isinstance(plotFeature, str):
if plotFeature.lower() == 'model':
if self.radobj.bgdist is not None:
xplot = self.radobj.xall
yplot = self.radobj.yall - self.radobj.bgfit(xplot)
xlim=np.max(self.radobj.bgdist*1.5)
else:
xplot=self.radobj.xall
yplot=self.radobj.yall
xlim=np.max(np.absolute(self.radobj.fitdist))*1.5
## Plot model
#Adjust axis limit based on percentiles of data
#axis.set_xlim(np.min(self.radobj.xall), np.max(self.radobj.xall))
#xlim=np.max(np.absolute([np.nanpercentile(self.radobj.xall[np.isfinite(self.radobj.yall)],1),np.nanpercentile(self.radobj.xall[np.isfinite(self.radobj.yall)],99)]))
if not self.radobj.fold:
ax.set_xlim(-xlim,+xlim)
else:
ax.set_xlim(0., +xlim)
ax.set_ylim(np.nanpercentile(yplot,0)-np.abs(0.5*np.nanpercentile(yplot,0)),np.nanpercentile(yplot,99.9)+np.abs(0.25*np.nanpercentile(yplot,99.9)))
ax.plot(xplot, yplot, 'k.', markersize = 1., alpha=styles.get_scatter_alpha(len(self.radobj.xall)))
if self.radobj.binning:
if self.radobj.bgdist is not None:
plotbinx, plotbiny = np.ravel(list(zip(self.radobj.bins[:-1], self.radobj.bins[1:]))), np.ravel(list(zip(self.radobj.mastery-self.radobj.bgfit(self.radobj.masterx), self.radobj.mastery-self.radobj.bgfit(self.radobj.masterx))))
else:
plotbinx, plotbiny = np.ravel(list(zip(self.radobj.bins[:-1], self.radobj.bins[1:]))), np.ravel(list(zip(self.radobj.mastery, self.radobj.mastery)))
ax.plot(plotbinx, plotbiny,
'r-')
# Plot the range
if self.radobj.fitdist is not None:
## symmetric fitting range
if isinstance(self.radobj.fitdist, numbers.Number):
ax.fill_between([-self.radobj.fitdist, self.radobj.fitdist], *ax.get_ylim(),
facecolor = (0., 0., 1., .05),
edgecolor = 'b',
linestyle = '--',
linewidth = 1.)
## asymmetric fitting range
elif np.asarray(self.radobj.fitdist).shape == (2,):
plot_fitdist = self.radobj.fitdist.copy()
plot_fitdist[~np.isfinite(plot_fitdist)] = np.asarray(ax.get_xlim())[~np.isfinite(plot_fitdist)]
ax.fill_between(plot_fitdist, *ax.get_ylim(),
facecolor = (0., 0., 1., .05),
edgecolor = 'b',
linestyle = '--',
linewidth = 1.)
## no fitting range; all data are used
else:
ax.fill_between(ax.get_xlim(), *ax.get_ylim(),
facecolor = (0., 0., 1., .05),
edgecolor = 'b',
linestyle = '--',
linewidth = 1.)
# Plot the predicted curve
ax.plot(np.linspace(ax.get_xlim()[0],ax.get_xlim()[1],500), self.radobj.profilefit(np.linspace(ax.get_xlim()[0],ax.get_xlim()[1],500)), 'b-', lw = 3., alpha = .6)
ax.text(0.03, 0.95,"{}={:.2E}\n{}={:.2f}\n{}={:.2f}".format(self.radobj.profilefit.param_names[0],self.radobj.profilefit.parameters[0],self.radobj.profilefit.param_names[1],self.radobj.profilefit.parameters[1],self.radobj.profilefit.param_names[2],self.radobj.profilefit.parameters[2]),ha='left',va='top', fontweight='bold',fontsize=20,transform=ax.transAxes)#,bbox={'facecolor':'white', 'edgecolor':'none', 'alpha':1.0, 'pad':1})
ax.text(0.97, 0.95,"{}\nFit".format(self.radobj.fitfunc.capitalize()), ha='right',va='top', color='blue',fontweight='bold', fontsize=20, transform=ax.transAxes)#,bbox={'facecolor':'white', 'edgecolor':'none', 'alpha':1.0, 'pad':1})
#ax.tick_params(labelsize=14)
elif plotFeature.lower() == 'bg':
if self.radobj.bgdist is None:
raise ValueError('No bgfit in the radfil object. Rerun fit_profile.')
#xlim=np.max(np.absolute([np.nanpercentile(self.radobj.xall[np.isfinite(self.radobj.yall)],1),np.nanpercentile(self.radobj.xall[np.isfinite(self.radobj.yall)],99)]))
xlim=np.max(self.radobj.bgdist*1.5)
if not self.radobj.fold:
ax.set_xlim(-xlim,+xlim)
else:
ax.set_xlim(0., +xlim)
ax.set_ylim(np.nanpercentile(self.radobj.yall,0)-np.abs(0.5*np.nanpercentile(self.radobj.yall,0)),np.nanpercentile(self.radobj.yall,99.9)+np.abs(0.25*np.nanpercentile(self.radobj.yall,99.9)))
ax.plot(self.radobj.xall, self.radobj.yall, 'k.', markersize = 1., alpha=styles.get_scatter_alpha(len(self.radobj.xall)))
##########
if self.radobj.binning:
plotbinx, plotbiny = np.ravel(list(zip(self.radobj.bins[:-1], self.radobj.bins[1:]))), np.ravel(list(zip(self.radobj.mastery, self.radobj.mastery)))
ax.plot(plotbinx, plotbiny,
'r-')
# Plot the range
plot_bgdist = self.radobj.bgdist.copy()
plot_bgdist[~np.isfinite(plot_bgdist)] = np.asarray(ax.get_xlim())[~np.isfinite(plot_bgdist)]
ax.fill_between(plot_bgdist, *ax.get_ylim(),
facecolor = (0., 1., 0., .05),
edgecolor = 'g',
linestyle = '--',
linewidth = 1.)
ax.fill_between(-plot_bgdist, *ax.get_ylim(),
facecolor = (0., 1., 0., .05),
edgecolor = 'g',
linestyle = '--',
linewidth = 1.)
ax.plot(np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 500), self.radobj.bgfit(np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 500)),'g-', lw=3)
#ax.set_xticklabels([])
#ax.tick_params(labelsize=14)
xplot = self.radobj.xall
yplot = self.radobj.yall - self.radobj.bgfit(xplot)
#Add labels#
if self.radobj.bgfit.degree == 1:
ax.text(0.03, 0.95,"y=({:.2E})x+({:.2E})".format(self.radobj.bgfit.parameters[1],self.radobj.bgfit.parameters[0]),ha='left',va='top', fontweight='bold',fontsize=20, transform=ax.transAxes)#,bbox={'facecolor':'white', 'edgecolor':'none', 'alpha':1.0, 'pad':1})
elif self.radobj.bgfit.degree == 0:
ax.text(0.03, 0.95,"y=({:.2E})".format(self.radobj.bgfit.c0.value),ha='left',va='top', fontweight='bold',fontsize=20, transform=ax.transAxes)
else:
warnings.warn("Labeling BG functions of higher degrees during plotting are not supported yet.")
ax.text(0.97, 0.95,"Background\nFit", ha='right',va='top', fontweight='bold',fontsize=20, color='green',transform=ax.transAxes)#,bbox={'facecolor':'white', 'edgecolor':'none', 'alpha':1.0, 'pad':1})
else:
raise ValueError('plotFeature has to be either "model" or "bg".')
else:
raise ValueError('plotFeature has to be either "model" or "bg".')
|
gpl-3.0
|
DOV-Vlaanderen/pydov
|
tests/test_search_itp_geotechnischecodering.py
|
1
|
7392
|
"""Module grouping tests for the interpretaties search module."""
import pandas as pd
from owslib.fes import PropertyIsEqualTo
from pandas import DataFrame
from pydov.search.interpretaties import GeotechnischeCoderingSearch
from pydov.types.interpretaties import GeotechnischeCodering
from tests.abstract import AbstractTestSearch
location_md_metadata = \
'tests/data/types/interpretaties/geotechnische_codering/' \
'md_metadata.xml'
location_fc_featurecatalogue = \
'tests/data/types/interpretaties/geotechnische_codering/' \
'fc_featurecatalogue.xml'
location_wfs_describefeaturetype = \
'tests/data/types/interpretaties/geotechnische_codering/' \
'wfsdescribefeaturetype.xml'
location_wfs_getfeature = \
'tests/data/types/interpretaties/geotechnische_codering/' \
'wfsgetfeature.xml'
location_wfs_feature = \
'tests/data/types/interpretaties/geotechnische_codering/feature.xml'
location_dov_xml = \
'tests/data/types/interpretaties/geotechnische_codering' \
'/geotechnische_codering.xml'
location_xsd_base = \
'tests/data/types/interpretaties/geotechnische_codering/xsd_*.xml'
class TestGeotechnischeCoderingSearch(AbstractTestSearch):
search_instance = GeotechnischeCoderingSearch()
datatype_class = GeotechnischeCodering
valid_query_single = PropertyIsEqualTo(propertyname='Proefnummer',
literal='GEO-15/139-B1')
inexistent_field = 'onbestaand'
wfs_field = 'Proefnummer'
xml_field = 'grondsoort'
valid_returnfields = ('pkey_interpretatie',
'betrouwbaarheid_interpretatie')
valid_returnfields_subtype = (
'pkey_interpretatie', 'diepte_laag_van', 'diepte_laag_tot')
valid_returnfields_extra = ('pkey_interpretatie', 'gemeente')
df_default_columns = ['pkey_interpretatie', 'pkey_boring',
'betrouwbaarheid_interpretatie', 'x', 'y',
'start_interpretatie_mtaw',
'diepte_laag_van', 'diepte_laag_tot',
'hoofdnaam1_grondsoort', 'hoofdnaam2_grondsoort',
'bijmenging1_plaatselijk', 'bijmenging1_hoeveelheid',
'bijmenging1_grondsoort',
'bijmenging2_plaatselijk', 'bijmenging2_hoeveelheid',
'bijmenging2_grondsoort',
'bijmenging3_plaatselijk', 'bijmenging3_hoeveelheid',
'bijmenging3_grondsoort']
def test_search_nan(self, mp_wfs, mp_get_schema,
mp_remote_describefeaturetype, mp_remote_md,
mp_remote_fc, mp_remote_wfs_feature, mp_dov_xml):
"""Test the search method with only the query parameter.
Test whether the result is correct.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
mp_get_schema : pytest.fixture
Monkeypatch the call to a remote OWSLib schema.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType.
mp_remote_md : pytest.fixture
Monkeypatch the call to get the remote metadata.
mp_remote_fc : pytest.fixture
Monkeypatch the call to get the remote feature catalogue.
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_dov_xml : pytest.fixture
Monkeypatch the call to get the remote XML data.
"""
self.search_instance.search(
query=self.valid_query_single)
def test_search_customreturnfields(self, mp_get_schema,
mp_remote_describefeaturetype,
mp_remote_wfs_feature, mp_dov_xml):
"""Test the search method with custom return fields.
Test whether the output dataframe is correct.
Parameters
----------
mp_get_schema : pytest.fixture
Monkeypatch the call to a remote OWSLib schema.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType .
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_dov_xml : pytest.fixture
Monkeypatch the call to get the remote XML data.
"""
df = self.search_instance.search(
query=self.valid_query_single,
return_fields=('pkey_interpretatie', 'pkey_boring'))
assert isinstance(df, DataFrame)
assert list(df) == ['pkey_interpretatie', 'pkey_boring']
assert not pd.isnull(df.pkey_boring[0])
def test_search_xml_resolve(self, mp_get_schema,
mp_remote_describefeaturetype,
mp_remote_wfs_feature, mp_dov_xml):
"""Test the search method with return fields from XML but not from a
subtype.
Test whether the output dataframe contains the resolved XML data.
Parameters
----------
mp_get_schema : pytest.fixture
Monkeypatch the call to a remote OWSLib schema.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType.
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_dov_xml : pytest.fixture
Monkeypatch the call to get the remote XML data.
"""
df = self.search_instance.search(
query=self.valid_query_single,
return_fields=('pkey_interpretatie', 'diepte_laag_tot'))
assert df.diepte_laag_tot[0] == 2.0
def test_search_multiple_return(self, mp_get_schema,
mp_remote_describefeaturetype,
mp_remote_wfs_feature, mp_dov_xml):
"""Test the search method returning multiple (sub)elements of the same subject.
Test whether the output dataframe contains the resolved XML data.
Parameters
----------
mp_get_schema : pytest.fixture
Monkeypatch the call to a remote OWSLib schema.
mp_remote_describefeaturetype : pytest.fixture
Monkeypatch the call to a remote DescribeFeatureType.
mp_remote_wfs_feature : pytest.fixture
Monkeypatch the call to get WFS features.
mp_dov_xml : pytest.fixture
Monkeypatch the call to get the remote XML data.
"""
df = self.search_instance.search(
query=self.valid_query_single,
return_fields=('pkey_interpretatie',
'hoofdnaam1_grondsoort',
'hoofdnaam2_grondsoort',
'bijmenging1_grondsoort',
'bijmenging1_hoeveelheid',
'bijmenging1_plaatselijk'
))
assert df.hoofdnaam1_grondsoort[0] == 'FZ'
assert df.hoofdnaam2_grondsoort[0] == 'LE'
assert df.bijmenging1_grondsoort[0] == 'SN'
assert df.bijmenging1_hoeveelheid[0] == 'N'
# mind that the column below is of dtype 'object'
assert df.bijmenging1_plaatselijk[0] is False
|
mit
|
kjford/mlprojects
|
sparseAutoCoder.py
|
1
|
8192
|
# Neural network implementation of sparse encoder
import numpy as np
import scipy.io
import scipy.optimize as optm
import matplotlib.pyplot as plt
# functions:
def loadImagePatches(imfile='testdata/IMAGES.mat',
imvar='IMAGES',patchsize=8,npatches=10000,edgebuff=5,scale0to1=True):
# open .mat file containing images in a r x c x num images array
# load patches that are patchsize x patchsize
# normalize scale to 0 to 1 values
imgdict = scipy.io.loadmat(imfile)
imgarray = imgdict[imvar]
# get dimentions
r = imgarray.shape[0] - 2*edgebuff - patchsize
c = imgarray.shape[1] - 2*edgebuff - patchsize
nimg = imgarray.shape[2]
# allocate random numbers and patches arrays
patches = np.zeros([patchsize**2,npatches])
randrow = np.random.randint(r,size=npatches) + edgebuff
randcol = np.random.randint(c,size=npatches) + edgebuff
randimg = np.random.randint(nimg,size=npatches)
for i in range(npatches):
r1 = randrow[i]
r2 = r1+patchsize
c1 = randcol[i]
c2 = c1 + patchsize
imi = randimg[i]
patchi = imgarray[r1:r2,c1:c2,imi]
patches[:,i] = patchi.reshape(1,patchsize**2)
# normalize
# subtract mean and scale by 3 stdev's
patches -= patches.mean(0)
pstd = patches.std() * 3
patches = np.maximum(np.minimum(patches, pstd),-pstd) / pstd
if scale0to1:
# Rescale from [-1,1] to [0.1,0.9]
patches = (patches+1) * 0.4 + 0.1
return patches
def squareImgPlot(I):
# show n square images in a L x M array as single large panel image
# where each image is L**0.5 x L**0.5 pixels
# plotted image is M**0.5
I = I - np.mean(I)
(L, M)=I.shape
sz=int(np.sqrt(L))
buf=1
if np.floor(np.sqrt(M))**2 != M :
n=int(np.ceil(np.sqrt(M)))
while M % n !=0 and n<1.2*np.sqrt(M): n+=1
m=int(np.ceil(M/n))
else:
n=int(np.sqrt(M))
m=n
a=-np.ones([buf+m*(sz+buf)-1,buf+n*(sz+buf)-1])
k=0
for i in range(m):
for j in range(n):
if k>M:
continue
clim=np.max(np.abs(I[:,k]))
r1=buf+i*(sz+buf)
r2=r1+sz
c1=buf+j*(sz+buf)
c2=c1+sz
a[r1:r2,c1:c2]=I[:,k].reshape(sz,sz)/clim
k+=1
h = plt.imshow(a,cmap='gray',interpolation='none',vmin=-1,vmax=1)
def initWeights(layervec,usebias=True):
# initialize weights to each layer of network between -r and r
# layervec is array with size of input layer, each hidden layer, and output layer
# outputs initialized weights rolled into a single vector
# option to use a bias input to each layer
r = np.sqrt(6) / np.sqrt(np.sum(layervec[1:]))
inweights = layervec[:-1]
nunits = layervec[1:]
totalW=np.multiply(inweights,nunits).sum()
W=np.random.rand(totalW)*2*r-r
if usebias:
W=np.append(W,np.zeros(sum(nunits)))
return W
def numericalGradient(J,theta,e=1e-4):
# compute numerical gradient as slope of J at theta values
# J is a function handle that returns a cost value (and probably gradient)
perturb = np.zeros(np.size(theta))
numgrad = np.zeros(np.size(theta))
for p in range(np.size(theta)):
perturb[p] = e
loss1 = J(theta - perturb)
loss2 = J(theta + perturb)
# Compute Numerical Gradient
numgrad[p] = (loss2[0] - loss1[0]) / (2*e)
perturb[p] = 0
return numgrad
def sparseNNCost(X,Y,theta,layervec,lam=0.0001,sparsityParam=0.01,beta=3,costtype='ss',outtype='sig'):
# compute the cost and gradient of neural network
# X is ndarray of n features by m examples
# Y is ndarray of output layer size by m examples
# note: for a sparse autoencoder X=Y
# theta is a rolled vector of parameters
# layervec is an list with the architecture of the network
# lam is the regularization cost parameter
# sparsityParam is the target sparsity value
# beta is the sparsity cost parameter
# defaults to a sum of squares cost (ss) of 1/m*(h(x)-y)**2
# optional log likelihood cost (costtype='ll')
# Defaults to a sigmoid (sig) output layer
# optional linear (outtype='linear') output layer
# get number of examples and number of layers
m=X.shape[1]
inweights = layervec[:-1]
nunits = layervec[1:]
totalW=np.multiply(inweights,nunits).sum()
if theta.size>totalW:
usebias=1
B=[]
bcount=0
# perform forward pass through layers
W=[]
A=[X]
wcount=0
sigmoid = lambda x: (1+np.exp(-x))**-1
for i in range(len(nunits)):
nwi=inweights[i]
nui=nunits[i]
wi=theta[wcount:wcount+nwi*nui].reshape(nwi,nui)
W.append(wi)
a = np.dot(wi.T,A[i])
if usebias:
bi=theta[totalW+bcount:totalW+bcount+nui]
a+=bi.reshape(nui,1)
B.append(bi)
bcount+=nui
wcount+=nwi*nui
if outtype=='linear' and i==len(nunits-1):
A.append(a)
else:
A.append(sigmoid(a))
# compute error
if costtype=='ss':
errcost = ((A[-1] - Y)**2).sum()/(2.0*m)
elif costtype == 'll':
errcost = (-Y*log(A[-1]) - (1-Y)*log(1-A[-1])).sum()
else:
print('Error type not recognized. Using sum of squares error\n')
errcost = ((A[-1] - Y)**2).sum()/(2.0*m)
costtype='ss'
# compute regularization cost
regcost = 0.5 * lam * (theta[:totalW]**2).sum()
# sparsity cost using KL divergence
# for now only assessed on first hidden layer
pj = (1.0/m)*A[1].sum(axis=1)
p=sparsityParam
KLdiv=p*np.log(p/pj) + (1-p)*np.log((1-p)/(1-pj))
sparcost = beta * KLdiv.sum()
# add up costs
cost = errcost + regcost + sparcost
#print(cost)
# perform backpropagation
if costtype=='ss':
if outtype=='sig':
errout = -(Y-A[-1])*A[-1]*(1-A[-1]) # vis x m
else:
errout = -(Y-A[-1])
else:
if outtype=='sig':
errout = -(Y-A[-1])
else:
print('Log-likelihood error with linear outputs is not valid. Using sigmoid.')
outtype=='sig'
errout = -(Y-A[-1])
# go backward through hidden layers
layercount = range(len(A))
revlayer = layercount[::-1][1:] #reversed count less last layer
layererr = [errout,]
Wgrad = W[:]
if usebias:
Bgrad = B[:]
for i in revlayer:
# err in layer is:
# (weights transpose * err in layer+1) element wise *
# deriv of layer activation wrt activation fxn (sigmoid)
# get outgoing weights
wi=W[i]
# err from layer n+1
erri=layererr[-1]
# activation of layer i
ai=A[i]
derivi=ai*(1-ai)
# if second layer then add sparsity err
if i==1:
# use pj (sparsity of layer 2 averaged over m samples (size l2)
KLderiv = -(p/pj) + (1-p)/(1-pj);
# need to make l2 x 1 to add to err of weights
sparerr = beta * KLderiv.reshape(KLderiv.size,1)
layererr.append((np.dot(wi,erri)+sparerr) * derivi)
elif i>1:
layererr.append(np.dot(wi,erri) * derivi)
Wgrad[i] = np.dot(ai,erri.T)/m + lam * wi
if usebias:
Bgrad[i] = (erri.sum(axis=1))/m
# string together gradients
thetagrad=theta*1.
wcount=0
bcount=0
for i in range(len(Wgrad)):
nw=Wgrad[i].size
thetagrad[wcount:nw+wcount]=Wgrad[i].reshape(nw)
wcount+=nw
if usebias:
nb=Bgrad[i].size
thetagrad[totalW+bcount:totalW+bcount+nb]=Bgrad[i].reshape(nb)
bcount+=nb
return(cost,thetagrad)
if __name__ == "__main__":
# run with defaults
p=loadImagePatches()
l=[64,25,64]
w0=initWeights(l)
theta,fcost,fl=optm.fmin_l_bfgs_b(lambda x: sparseNNCost(p,p,x,l),w0)
squareImgPlot(theta[:(64*25)].reshape(64,25))
plt.savefig('W1.eps')
|
gpl-3.0
|
brandonckelly/bck_stats
|
bck_stats/super_pca.py
|
2
|
11974
|
__author__ = 'brandonkelly'
import numpy as np
from sklearn import cross_validation, metrics
from sklearn.decomposition import PCA
import multiprocessing
import copy
import matplotlib.pyplot as plt
class SupervisedPCABase(object):
def __init__(self, regressor, max_components=None, n_components=1, whiten=True):
"""
Base class for performing supervised principal component regression. This is useful for cases where the number
of inputs (features) is greater than the number of data points.
@param regressor: The object that will perform the regression. The following members must be defined for this
object:
regressor.fit(X, y) : Fits the regression model y = f(X).
regressor.predict(X) : Compute the prediction y = f(X).
regressor.coef_score_ : The score of each parameter, used for ranking the most important features when
computing the reduced feature space. In general this will be the absolute value of
the coefficient value divided by its standard error. Note that this should *not*
include the intercept.
@param max_components: Maximum number of components to search over. The default is p.
@param n_components: The number of reduced data matrix PCA components to use in the regression.
@param whiten: Remove differences in variance among the components, i.e., principal components will have unit
variance
"""
self.regressor = regressor
self.max_components = max_components
self.pca_object = PCA(n_components=n_components, whiten=whiten)
self.n_components = n_components
self.whiten = whiten
self.n_reduced = 0
self.sort_idx = np.zeros(1)
def _compute_stnd_coefs(self, X, y):
"""
Compute the standardized regression coefficients, up to a common scaling factor.
@param X: The matrix of inputs, shape (n,p).
@param y: The array of response values, size n.
@return: The standardized regression coefficients, size p.
"""
p = X.shape[1]
scoefs = np.zeros(p)
for j in xrange(p):
thisX = X[:, j]
self.regressor.fit(thisX[:, np.newaxis], y)
scoefs[j] = self.regressor.coef_score_
return scoefs
def _get_reduced_features(self, X, coefs, pmax):
"""
Return the data projected onto the first n_components principal components computed using the reduced feature
space.
@param X: The array of inputs, shape (n, p).
@param coefs: The array of standardized coefficients, size p.
@param pmax: The maximum number of features to use in the reduced feature space PCA.
@return: The data projected onto the reduced feature space PCA, shape (n, self.n_components).
"""
sort_idx = np.argsort(coefs)[::-1]
sort_idx = sort_idx[:pmax]
self.pca_object.fit(X[:, sort_idx])
X_reduced = self.pca_object.transform(X[:, sort_idx])
return X_reduced, sort_idx
def fit(self, X, y, n_reduced):
"""
Perform the regression using the first self.n_components principal components from the reduced feature space.
Note that this will call self.regressor.fit(X,y) to perform the regression.
@param X: The array of inputs, shape (n, p).
@param y: The array of response values, size n.
@param n_reduced: The number of features to use in the reduced feature space.
"""
scoefs = self._compute_stnd_coefs(X, y)
X_reduced, sort_idx = self._get_reduced_features(X, scoefs, n_reduced)
self.sort_idx = sort_idx
self.regressor.fit(X_reduced, y)
def predict(self, X):
"""
Predict the value y = f(X) based on the PCA using the reduced feature space, based on the most recent call to
self.fit(X, y, n_reduced).
@param X: The array of inputs, shape (n, p).
@return: The predicted values of the response.
"""
X_reduced = self.pca_object.transform(X[:, self.sort_idx])
y_predict = self.regressor.predict(X_reduced)
return y_predict
def launch_coef_scores(args):
"""
Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of
features in the reduced parameter set.
@param args: Tuple containing the instance of SupervisedPCABase, feature matrix and response array.
@return: The standardzed scores of the coefficients.
"""
spca, X, y = args
scoefs = spca._compute_stnd_coefs(X, y)
return scoefs
def compute_cv_prediction(args):
"""
Internal method to get predictions based on supervised PCA regression for each cross-validation fold. Need this
format in order to compute the predictions for the CV folds in parallel.
"""
spca, X_train, y_train, X_test, n_reduced, scoef = args
SPCA = SupervisedPCABase(copy.deepcopy(spca.regressor), spca.max_components, spca.n_components, spca.whiten)
X_reduced, sort_idx = SPCA._get_reduced_features(X_train, scoef, n_reduced)
SPCA.regressor.fit(X_reduced, y_train)
X_test_reduced = SPCA.pca_object.transform(X_test[:, sort_idx])
y_predict = SPCA.regressor.predict(X_test_reduced)
return y_predict
class SupervisedPCA(SupervisedPCABase):
def __init__(self, regressor, max_components=None, n_components=1, whiten=True, n_jobs=1):
"""
Class for performing supervised principal component regression. This is useful for cases where the number of
inputs (features) is greater than the number of data points.
@param regressor: The object that will perform the regression. The following members must be defined for this
object:
regressor.fit(X, y) : Fits the regression model y = f(X).
regressor.predict(X) : Compute the prediction y = f(X).
regressor.coef_score_ : The score of each parameter, used for ranking the most important features when
computing the reduced feature space. In general this will be the absolute value of
the coefficient value divided by its standard error. Note that this should *not*
include the intercept.
@param max_components: Maximum number of components to search over. The default is p.
@param n_components: The number of reduced data matrix PCA components to use in the regression.
@param whiten: Remove differences in variance among the components, i.e., principal components will have unit
variance
@param n_jobs: The number of threads to use for parallel processing. If n_jobs = -1 then use maximum number
available.
"""
super(SupervisedPCA, self).__init__(regressor, max_components, n_components, whiten)
if n_jobs < 0:
n_jobs = multiprocessing.cpu_count()
self.n_jobs = n_jobs
def _compute_cv_prediction(self, args):
"""
Internal method to get predictions based on supervised PCA regression for each cross-validation fold. Need this
format in order to compute the predictions for the CV folds in parallel.
"""
X_train, y_train, X_test, n_reduced, scoef = args
SPCA = SupervisedPCABase(copy.deepcopy(self.regressor), self.max_components, self.n_components, self.whiten)
X_reduced, sort_idx = SPCA._get_reduced_features(X_train, scoef, n_reduced)
SPCA.regressor.fit(X_reduced, y_train)
X_test_reduced = SPCA.pca_object.transform(X_test[:, sort_idx])
y_predict = SPCA.regressor.predict(X_test_reduced)
return y_predict
def _launch_coef_scores(self, args):
"""
Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of
features in the reduced parameter set.
@param args: Tuple containing the feature matrix and response array.
@return: The standardzed scores of the coefficients.
"""
X, y = args
scoefs = self._compute_stnd_coefs(X, y)
return scoefs
def choose_nreduced(self, X, y, lossfunc=None, cv=None, verbose=False, cvplot=False):
"""
Choose the number of features to use in the reduced feature set by minimizing the cross-validation error.
@param X: The feature matrix, shape (n,p)
@param y: The vector of response values, size n.
@param lossfunc: The loss function to use for the CV error, callable. The default is mean squared error.
@param cv: Number of CV folds (if int), or cross-validation iterator.
@param verbose: Print helpful information.
@param cvplot: Plot the CV error as a function of the number features in the reduced feature set.
@return: The number of features in the reduced feature set that minimized the CV error.
"""
if self.n_jobs > 1:
pool = multiprocessing.Pool(self.n_jobs)
pool.map(int, range(self.n_jobs)) # Trick to "warm up" the Pool
# setup cross-validation iterator
if cv is None:
K_folds = 8
if isinstance(cv, int):
K_folds = cv
cv = cross_validation.KFold(y.size, n_folds=K_folds)
if lossfunc is None:
lossfunc = metrics.mean_squared_error
if self.max_components is None:
self.max_components = X.shape[1]
if verbose:
print 'Searching over', self.max_components, ' features to include in the reduced feature space.'
print 'Computing univariate regression tests statistics for each feature...'
# first compute coefficients scores
sargs = []
for train_idx, test_idx in cv:
if self.n_jobs == 1:
sargs.append((X[train_idx, :], y[train_idx]))
else:
sargs.append((self, X[train_idx, :], y[train_idx]))
if self.n_jobs == 1:
scoefs = map(self._launch_coef_scores, sargs)
else:
scoefs = pool.map(launch_coef_scores, sargs)
# find optimal number of features to use in PCA on reduced feature set, do this by minimizing cross-validation
# error on a grid.
cverrors = np.zeros(self.max_components)
if verbose:
print 'Computing cross-validation errors on a grid of up to', self.max_components, 'features used in the', \
'reduced feature space...'
for k in xrange(self.max_components):
cverror_args = []
ytest = []
fold_idx = 0
for train_idx, test_idx in cv:
if self.n_jobs == 1:
cverror_args.append((X[train_idx, :], y[train_idx], X[test_idx, :], k + 1, scoefs[fold_idx]))
else:
cverror_args.append((self, X[train_idx, :], y[train_idx], X[test_idx, :], k + 1, scoefs[fold_idx]))
ytest.append(y[test_idx])
fold_idx += 1
if self.n_jobs == 1:
ypredictions = map(self._compute_cv_prediction, cverror_args)
else:
ypredictions = pool.map(compute_cv_prediction, cverror_args)
cverror_k = 0.0
for yt, yp in zip(ytest, ypredictions):
cverror_k += lossfunc(yt, yp) / K_folds
cverrors[k] = cverror_k
if cvplot:
plt.plot(np.arange(1, self.max_components + 1), cverrors)
plt.xlabel('# of features in reduced set')
plt.ylabel('CV Loss Function')
plt.show()
n_reduced = cverrors.argmin() + 1
if verbose:
print 'Selected', n_reduced, 'features to use in the reduced feature set.'
return n_reduced
|
mit
|
ashwyn/eden-message_parser
|
modules/tests/smoke/broken_links.py
|
1
|
11841
|
from time import time
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import sys
from tests.web2unittest import Web2UnitTest
from gluon import current
try:
from twill import get_browser
from twill import set_output
from twill.browser import *
except ImportError:
raise NameError("Twill not installed")
try:
import mechanize
# from mechanize import BrowserStateError
# from mechanize import ControlNotFoundError
except ImportError:
raise NameError("Mechanize not installed")
class BrokenLinkTest(Web2UnitTest):
""" Selenium Unit Test """
def __init__(self):
Web2UnitTest.__init__(self)
self.b = get_browser()
self.b_data = StringIO()
set_output(self.b_data)
self.clearRecord()
# This string must exist in the URL for it to be followed
# Useful to avoid going to linked sites
self.homeURL = self.url
# Link used to identify a URL to a ticket
self.url_ticket = "/admin/default/ticket/"
# Tuple of strings that if in the URL will be ignored
# Useful to avoid dynamic URLs that trigger the same functionality
self.include_ignore = ("_language=",
"logout",
"appadmin",
"admin"
)
# tuple of strings that should be removed from the URL before storing
# Typically this will be some variables passed in via the URL
self.strip_url = ("?_next=",
)
self.maxDepth = 16 # sanity check
self.setUser("[email protected]/eden")
self.linkDepth = []
def clearRecord(self):
# list of links that return a http_code other than 200
# with the key being the URL and the value the http code
self.brokenLinks = dict()
# List of links visited (key) with the parent
self.urlParentList = dict()
# List of links visited (key) with the depth
self.urlList = dict()
# List of urls for each model
self.model_url = dict()
self.totalLinks = 0
def setDepth(self, depth):
self.maxDepth = depth
def setUser(self, user):
self.credentials = user.split(",")
def login(self, credentials):
if credentials == "UNAUTHENTICATED":
url = "%s/default/user/logout" % self.homeURL
self.b.go(url)
return True
try:
(self.user, self.password) = credentials.split("/",1)
except:
msg = "Unable to split %s into a user name and password" % user
self.reporter(msg)
return False
url = "%s/default/user/login" % self.homeURL
self.b.go(url)
forms = self.b.get_all_forms()
for form in forms:
try:
if form["_formname"] == "login":
self.b._browser.form = form
form["email"] = self.user
form["password"] = self.password
self.b.submit("Login")
# If login is successful then should be redirected to the homepage
return self.b.get_url()[len(self.homeURL):] == "/default/index"
except:
# This should be a mechanize.ControlNotFoundError, but
# for some unknown reason that isn't caught on Windows or Mac
pass
return False
def runTest(self):
"""
Test to find all exposed links and check the http code returned.
This test doesn't run any javascript so some false positives
will be found.
The test can also display an histogram depicting the number of
links found at each depth.
"""
for user in self.credentials:
self.clearRecord()
if self.login(user):
self.visitLinks()
def visitLinks(self):
url = self.homeURL
to_visit = [url]
start = time()
for depth in range(self.maxDepth):
if len(to_visit) == 0:
break
self.linkDepth.append(len(to_visit))
self.totalLinks += len(to_visit)
visit_start = time()
url_visited = "%d urls" % len(to_visit)
to_visit = self.visit(to_visit, depth)
msg = "%.2d Visited %s in %.3f seconds, %d more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
self.reporter(msg)
if self.config.verbose == 2:
if self.stdout.isatty(): # terminal should support colour
msg = "%.2d Visited \033[1;32m%s\033[0m in %.3f seconds, \033[1;31m%d\033[0m more urls found" % (depth, url_visited, time()-visit_start, len(to_visit))
print >> self.stdout, msg
if len(to_visit) > 0:
self.linkDepth.append(len(to_visit))
finish = time()
self.report()
self.reporter("Finished took %.3f seconds" % (finish - start))
self.report_link_depth()
# self.report_model_url()
def add_to_model(self, url, depth, parent):
start = url.find(self.homeURL) + len(self.homeURL)
end = url.find("/",start)
model = url[start:end]
if model in self.model_url:
self.model_url[model].append((url, depth, parent))
else:
self.model_url[model] = [(url, depth, parent)]
def visit(self, url_list, depth):
repr_list = [".pdf", ".xls", ".rss", ".kml"]
to_visit = []
for visited_url in url_list:
index_url = visited_url[len(self.homeURL):]
# Find out if the page can be visited
open_novisit = False
for repr in repr_list:
if repr in index_url:
open_novisit = True
break
try:
if open_novisit:
self.b._journey("open_novisit", visited_url)
http_code = self.b.get_code()
if http_code != 200: # an error situation
self.b.go(visited_url)
http_code = self.b.get_code()
else:
self.b.go(visited_url)
http_code = self.b.get_code()
except Exception as e:
import traceback
print traceback.format_exc()
self.brokenLinks[index_url] = ("-","Exception raised")
continue
http_code = self.b.get_code()
if http_code != 200:
url = "<a href=%s target=\"_blank\">URL</a>" % (visited_url)
self.brokenLinks[index_url] = (http_code,url)
elif open_novisit:
continue
links = []
try:
if self.b._browser.viewing_html():
links = self.b._browser.links()
else:
continue
except Exception as e:
import traceback
print traceback.format_exc()
self.brokenLinks[index_url] = ("-","Exception raised")
continue
for link in (links):
url = link.absolute_url
if url.find(self.url_ticket) != -1:
# A ticket was raised so...
# capture the details and add to brokenLinks
if current.test_config.html:
ticket = "<a href=%s target=\"_blank\">Ticket</a> at <a href=%s target=\"_blank\">URL</a>" % (url,visited_url)
else:
ticket = "Ticket: %s" % url
self.brokenLinks[index_url] = (http_code,ticket)
break # no need to check any other links on this page
if url.find(self.homeURL) == -1:
continue
ignore_link = False
for ignore in self.include_ignore:
if url.find(ignore) != -1:
ignore_link = True
break
if ignore_link:
continue
for strip in self.strip_url:
location = url.find(strip)
if location != -1:
url = url[0:location]
if url not in self.urlList:
self.urlList[url] = depth
self.urlParentList[url[len(self.homeURL):]] = visited_url
self.add_to_model(url, depth, visited_url)
if url not in to_visit:
to_visit.append(url)
return to_visit
def report(self):
# print "Visited pages"
# n = 1
# for (url, depth) in self.urlList.items():
# print "%d. depth %d %s" % (n, depth, url,)
# n += 1
self.reporter("%d URLs visited" % self.totalLinks)
self.reporter("Broken Links")
n = 1
for (url, result) in self.brokenLinks.items():
http_code = result[0]
try:
parent = self.urlParentList[url]
if current.test_config.html:
parent = "<a href=%s target=\"_blank\">Parent</a>" % (parent)
except:
parent = "unknown"
if len(result) == 1:
self.reporter("%3d. (%s) %s called from %s" % (n,
http_code,
url,
parent
)
)
else:
self.reporter("%3d. (%s-%s) %s called from %s" % (n,
http_code,
result[1],
url,
parent
)
)
n += 1
def report_link_depth(self):
"""
Method to draw a histogram of the number of new links
discovered at each depth.
(i.e. show how many links are required to reach a link)
"""
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
from numpy import arange
except ImportError:
return
self.reporter("Analysis of link depth")
fig = Figure(figsize=(4, 2.5))
# Draw a histogram
width = 0.9
rect = [0.12, 0.08, 0.9, 0.85]
ax = fig.add_axes(rect)
left = arange(len(self.linkDepth))
plot = ax.bar(left, self.linkDepth, width=width)
# Add the x axis labels
ax.set_xticks(left+(width*0.5))
ax.set_xticklabels(left)
chart = StringIO()
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart)
image = chart.getvalue()
import base64
base64Img = base64.b64encode(image)
image = "<img src=\"data:image/png;base64,%s\">" % base64Img
self.reporter(image)
def report_model_url(self):
print "Report breakdown by module"
for (model, value) in self.model_url.items():
print model
for ud in value:
url = ud[0]
depth = ud[1]
parent = ud[2]
tabs = "\t" * depth
print "%s %s-%s (parent url - %s)" % (tabs, depth, url, parent)
|
mit
|
glouppe/scikit-learn
|
sklearn/model_selection/_search.py
|
18
|
38851
|
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin, ChangedBehaviorWarning
from ._split import check_cv
from ._validation import _fit_and_score
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..utils import check_random_state
from ..utils.fixes import sp_version
from ..utils.random import sample_without_replacement
from ..utils.validation import _num_samples, indexable
from ..utils.metaestimators import if_delegate_has_method
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, labels, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y, labels = indexable(X, y, labels)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
n_splits = cv.get_n_splits(X, y, labels)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv.split(X, y, labels))
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_splits):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_splits]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_splits)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, ``StratifiedKFold`` is used for classification
tasks, when ``y`` is binary or multiclass.
See the :mod:`sklearn.model_selection` module for the list of
cross-validation strategies that can be used here.
Also refer :ref:`cross-validation documentation <cross_validation>`
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, labels=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, labels, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, ``StratifiedKFold`` is used for classification
tasks, when ``y`` is binary or multiclass.
See the :mod:`sklearn.model_selection` module for the list of
cross-validation strategies that can be used here.
Also refer :ref:`cross-validation documentation <cross_validation>`
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None, labels=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, labels, sampled_params)
|
bsd-3-clause
|
PrashntS/scikit-learn
|
examples/manifold/plot_manifold_sphere.py
|
258
|
5101
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
GuessWhoSamFoo/pandas
|
pandas/tests/indexes/period/test_indexing.py
|
1
|
25104
|
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
from pandas.compat import lrange
import pandas as pd
from pandas import (
DatetimeIndex, Period, PeriodIndex, Series, notna, period_range)
from pandas.util import testing as tm
class TestGetItem(object):
def test_ellipsis(self):
# GH#21282
idx = period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx[0]
assert result == pd.Period('2011-01-01', freq='D')
result = idx[-1]
assert result == pd.Period('2011-01-31', freq='D')
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15',
'2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_getitem_index(self):
idx = period_range('2007-01', periods=10, freq='M', name='x')
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(['2007-02', '2007-04', '2007-06'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False,
True, True, False, False, False]]
exp = pd.PeriodIndex(['2007-01', '2007-02', '2007-06', '2007-07'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
pytest.raises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
assert (result.index.year == 2008).all()
result = ts['2008':'2009']
assert len(result) == 24
result = ts['2008-1':'2009-12']
assert len(result) == 24
result = ts['2008Q1':'2009Q4']
assert len(result) == 24
result = ts[:'2009']
assert len(result) == 36
result = ts['2009':]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice('2008', '2009')]
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert idx[0] == pd.Period('2011-01', freq='M')
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert (s[pd.Period('2011-01', freq='M')] ==
pd.Period('2011-01', freq='M'))
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start='2012-01-01', periods=10, freq='D')
ts = Series(lrange(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period('2012-01-02', freq='D')]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = period_range(start='2013/01/01 09:00:00', freq='S',
periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
tm.assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start='2013/01/01', freq='D', periods=400)
pidx = period_range(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01'], s[0:31])
tm.assert_series_equal(s['2013/02'], s[31:59])
tm.assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with pytest.raises(KeyError):
s[v]
class TestWhere(object):
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range('20130101', periods=5, freq='D')
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq='D')
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range('20130101', periods=5, freq='D')
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
class TestTake(object):
def test_take(self):
# GH#10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period('2011-01-01', freq='D')
result = idx.take([5])
assert result == pd.Period('2011-01-06', freq='D')
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == 'D'
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05'], freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05',
'2011-01-02'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_take_misc(self):
index = period_range(start='1/1/10', end='12/31/12', freq='D',
name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx', freq='D')
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
class TestIndexing(object):
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
pytest.raises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03'])
pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M')
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float('nan')) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc('foo')
pytest.raises(KeyError, idx0.get_loc, 1.1)
pytest.raises(TypeError, idx0.get_loc, idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc('foo')
pytest.raises(KeyError, idx1.get_loc, 1.1)
pytest.raises(TypeError, idx1.get_loc, idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_is_unique(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
idx0 = pd.PeriodIndex([p0, p1, p2])
assert idx0.is_unique is True
idx1 = pd.PeriodIndex([p1, p1, p2])
assert idx1.is_unique is False
def test_contains(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
p3 = pd.Period('2017-09-04')
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert idx0.contains(p)
assert p in idx0
assert idx0.contains(str(p))
assert str(p) in idx0
assert idx0.contains('2017-09-01 00:00:01')
assert '2017-09-01 00:00:01' in idx0
assert idx0.contains('2017-09')
assert '2017-09' in idx0
assert not idx0.contains(p3)
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
input2 = np.array([1, 2, 3])
expected2 = np.array([1, 3])
result2 = idx2.get_value(input2, p1)
tm.assert_numpy_array_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
p1 = pd.Period('2017-09-01')
p2 = pd.Period('2017-09-04')
p3 = pd.Period('2017-09-07')
tp0 = pd.Period('2017-08-31')
tp1 = pd.Period('2017-09-02')
tp2 = pd.Period('2017-09-05')
tp3 = pd.Period('2017-09-09')
idx = pd.PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex([tp0, tp1, tp2, tp3])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2, -1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 0, 1, 2], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 day'))
tm.assert_numpy_array_equal(res,
np.array([0, 0, 1, -1], dtype=np.intp))
def test_get_indexer_non_unique(self):
# GH 17717
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
p3 = pd.Period('2017-09-04')
p4 = pd.Period('2017-09-05')
idx1 = pd.PeriodIndex([p1, p2, p1])
idx2 = pd.PeriodIndex([p2, p1, p3, p4])
result = idx1.get_indexer_non_unique(idx2)
expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp)
expected_missing = np.array([2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result[0], expected_indexer)
tm.assert_numpy_array_equal(result[1], expected_missing)
# TODO: This method came from test_period; de-dup with version above
def test_get_loc2(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].asfreq('H', how='start'), method) == 1
assert idx.get_loc(idx[1].to_timestamp(), method) == 1
assert idx.get_loc(idx[1].to_timestamp()
.to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
idx = pd.period_range('2000-01-01', periods=5)[::2]
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)) == 1
msg = 'unit abbreviation w/o a number'
with pytest.raises(ValueError, match=msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq=None from PeriodArray\\(freq=D\\)'
with pytest.raises(ValueError, match=msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with pytest.raises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
with pytest.raises(
ValueError,
match='list-like tolerance size must match target index size'):
idx.get_loc('2000-01-10', method='nearest',
tolerance=[pd.Timedelta('1 day').to_timedelta64(),
pd.Timedelta('1 day').to_timedelta64()])
# TODO: This method came from test_period; de-dup with version above
def test_get_indexer2(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq=None from PeriodArray\\(freq=H\\)'
with pytest.raises(ValueError, match=msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
np.timedelta64(1, 'D'), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
np.timedelta64(1, 'M'), ]
with pytest.raises(
libperiod.IncompatibleFrequency,
match='Input has different freq=None from'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(np.random.randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
assert expected == result
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
tm.assert_frame_equal(df, df.loc[idx])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
tm.assert_frame_equal(df, df.loc[list(idx)])
|
bsd-3-clause
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/matplotlib/textpath.py
|
10
|
16668
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
import six
from six.moves import zip
import warnings
import numpy as np
from matplotlib.path import Path
from matplotlib import rcParams
import matplotlib.font_manager as font_manager
from matplotlib.ft2font import KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ft2font import LOAD_TARGET_LIGHT
from matplotlib.mathtext import MathTextParser
import matplotlib.dviread as dviread
from matplotlib.font_manager import FontProperties, get_font
from matplotlib.transforms import Affine2D
from six.moves.urllib.parse import quote as urllib_quote
class TextToPath(object):
"""
A class that convert a given text to a path using ttf fonts.
"""
FONT_SCALE = 100.
DPI = 72
def __init__(self):
"""
Initialization
"""
self.mathtext_parser = MathTextParser('path')
self.tex_font_map = None
from matplotlib.cbook import maxdict
self._ps_fontd = maxdict(50)
self._texmanager = None
self._adobe_standard_encoding = None
def _get_adobe_standard_encoding(self):
enc_name = dviread.find_tex_file('8a.enc')
enc = dviread.Encoding(enc_name)
return dict([(c, i) for i, c in enumerate(enc.encoding)])
def _get_font(self, prop):
"""
find a ttf font.
"""
fname = font_manager.findfont(prop)
font = get_font(fname)
font.set_size(self.FONT_SCALE, self.DPI)
return font
def _get_hinting_flag(self):
return LOAD_NO_HINTING
def _get_char_id(self, font, ccode):
"""
Return a unique id for the given font and character-code set.
"""
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman')
except KeyError:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
char_id = urllib_quote('%s-%x' % (ps_name, ccode))
return char_id
def _get_char_id_ps(self, font, ccode):
"""
Return a unique id for the given font and character-code set (for tex).
"""
ps_name = font.get_ps_font_info()[2]
char_id = urllib_quote('%s-%d' % (ps_name, ccode))
return char_id
def glyph_to_path(self, font, currx=0.):
"""
convert the ft2font glyph to vertices and codes.
"""
verts, codes = font.get_path()
if currx != 0.0:
verts[:, 0] += currx
return verts, codes
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=None)
return w, h, d
fontsize = prop.get_size_in_points()
scale = float(fontsize) / self.FONT_SCALE
if ismath:
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, trash, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width * scale, height * scale, descent * scale
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
return w * scale, h * scale, d * scale
def get_text_path(self, prop, s, ismath=False, usetex=False):
"""
convert text *s* to path (a tuple of vertices and codes for
matplotlib.path.Path).
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. Effective only if usetex == False.
"""
if not usetex:
if not ismath:
font = self._get_font(prop)
glyph_info, glyph_map, rects = self.get_glyphs_with_font(
font, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_mathtext(
prop, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s)
verts, codes = [], []
for glyph_id, xposition, yposition, scale in glyph_info:
verts1, codes1 = glyph_map[glyph_id]
if len(verts1):
verts1 = np.array(verts1) * scale + [xposition, yposition]
verts.extend(verts1)
codes.extend(codes1)
for verts1, codes1 in rects:
verts.extend(verts1)
codes.extend(codes1)
return verts, codes
def get_glyphs_with_font(self, font, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes using the
provided ttf font.
"""
# Mostly copied from backend_svg.py.
lastgind = None
currx = 0
xpositions = []
glyph_ids = []
if glyph_map is None:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
# I'm not sure if I get kernings right. Needs to be verified. -JJL
for c in s:
ccode = ord(c)
gind = font.get_char_index(ccode)
if gind is None:
ccode = ord('?')
gind = 0
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
horiz_advance = (glyph.linearHoriAdvance / 65536.0)
char_id = self._get_char_id(font, ccode)
if char_id not in glyph_map:
glyph_map_new[char_id] = self.glyph_to_path(font)
currx += (kern / 64.0)
xpositions.append(currx)
glyph_ids.append(char_id)
currx += horiz_advance
lastgind = gind
ypositions = [0] * len(xpositions)
sizes = [1.] * len(xpositions)
rects = []
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, rects)
def get_glyphs_mathtext(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes by parsing it with
mathtext.
"""
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.DPI, prop)
if not glyph_map:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
xpositions = []
ypositions = []
glyph_ids = []
sizes = []
currx, curry = 0, 0
for font, fontsize, ccode, ox, oy in glyphs:
char_id = self._get_char_id(font, ccode)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
glyph_map_new[char_id] = self.glyph_to_path(font)
xpositions.append(ox)
ypositions.append(oy)
glyph_ids.append(char_id)
size = fontsize / self.FONT_SCALE
sizes.append(size)
myrects = []
for ox, oy, w, h in rects:
vert1 = [(ox, oy), (ox, oy + h), (ox + w, oy + h),
(ox + w, oy), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_glyphs_tex(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes using matplotlib's usetex
mode.
"""
# codes are modstly borrowed from pdf backend.
texmanager = self.get_texmanager()
if self.tex_font_map is None:
self.tex_font_map = dviread.PsfontsMap(
dviread.find_tex_file('pdftex.map'))
if self._adobe_standard_encoding is None:
self._adobe_standard_encoding = self._get_adobe_standard_encoding()
fontsize = prop.get_size_in_points()
if hasattr(texmanager, "get_dvi"):
dvifilelike = texmanager.get_dvi(s, self.FONT_SCALE)
dvi = dviread.DviFromFileLike(dvifilelike, self.DPI)
else:
dvifile = texmanager.make_dvi(s, self.FONT_SCALE)
dvi = dviread.Dvi(dvifile, self.DPI)
try:
page = next(iter(dvi))
finally:
dvi.close()
if glyph_map is None:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
glyph_ids, xpositions, ypositions, sizes = [], [], [], []
# Gather font information and do some setup for combining
# characters into strings.
# oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
font_and_encoding = self._ps_fontd.get(dvifont.texname)
font_bunch = self.tex_font_map[dvifont.texname]
if font_and_encoding is None:
font = get_font(font_bunch.filename)
for charmap_name, charmap_code in [("ADOBE_CUSTOM",
1094992451),
("ADOBE_STANDARD",
1094995778)]:
try:
font.select_charmap(charmap_code)
except (ValueError, RuntimeError):
pass
else:
break
else:
charmap_name = ""
warnings.warn("No supported encoding in font (%s)." %
font_bunch.filename)
if charmap_name == "ADOBE_STANDARD" and font_bunch.encoding:
enc0 = dviread.Encoding(font_bunch.encoding)
enc = dict([(i, self._adobe_standard_encoding.get(c, None))
for i, c in enumerate(enc0.encoding)])
else:
enc = dict()
self._ps_fontd[dvifont.texname] = font, enc
else:
font, enc = font_and_encoding
ft2font_flag = LOAD_TARGET_LIGHT
char_id = self._get_char_id_ps(font, glyph)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
if enc:
charcode = enc.get(glyph, None)
else:
charcode = glyph
if charcode is not None:
glyph0 = font.load_char(charcode, flags=ft2font_flag)
else:
warnings.warn("The glyph (%d) of font (%s) cannot be "
"converted with the encoding. Glyph may "
"be wrong" % (glyph, font_bunch.filename))
glyph0 = font.load_char(glyph, flags=ft2font_flag)
glyph_map_new[char_id] = self.glyph_to_path(font)
glyph_ids.append(char_id)
xpositions.append(x1)
ypositions.append(y1)
sizes.append(dvifont.size / self.FONT_SCALE)
myrects = []
for ox, oy, h, w in page.boxes:
vert1 = [(ox, oy), (ox + w, oy), (ox + w, oy + h),
(ox, oy + h), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
text_to_path = TextToPath()
class TextPath(Path):
"""
Create a path from the text.
"""
def __init__(self, xy, s, size=None, prop=None,
_interpolation_steps=1, usetex=False,
*kl, **kwargs):
"""
Create a path from the text. No support for TeX yet. Note that
it simply is a path, not an artist. You need to use the
PathPatch (or other artists) to draw this path onto the
canvas.
xy : position of the text.
s : text
size : font size
prop : font property
"""
if prop is None:
prop = FontProperties()
if size is None:
size = prop.get_size_in_points()
self._xy = xy
self.set_size(size)
self._cached_vertices = None
self._vertices, self._codes = self.text_get_vertices_codes(
prop, s,
usetex=usetex)
self._should_simplify = False
self._simplify_threshold = rcParams['path.simplify_threshold']
self._has_nonfinite = False
self._interpolation_steps = _interpolation_steps
def set_size(self, size):
"""
set the size of the text
"""
self._size = size
self._invalid = True
def get_size(self):
"""
get the size of the text
"""
return self._size
def _get_vertices(self):
"""
Return the cached path after updating it if necessary.
"""
self._revalidate_path()
return self._cached_vertices
def _get_codes(self):
"""
Return the codes
"""
return self._codes
vertices = property(_get_vertices)
codes = property(_get_codes)
def _revalidate_path(self):
"""
update the path if necessary.
The path for the text is initially create with the font size
of FONT_SCALE, and this path is rescaled to other size when
necessary.
"""
if (self._invalid or
(self._cached_vertices is None)):
tr = Affine2D().scale(
self._size / text_to_path.FONT_SCALE,
self._size / text_to_path.FONT_SCALE).translate(*self._xy)
self._cached_vertices = tr.transform(self._vertices)
self._invalid = False
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# copied from Text.is_math_text -JJL
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def text_get_vertices_codes(self, prop, s, usetex):
"""
convert the string *s* to vertices and codes using the
provided font property *prop*. Mostly copied from
backend_svg.py.
"""
if usetex:
verts, codes = text_to_path.get_text_path(prop, s, usetex=True)
else:
clean_line, ismath = self.is_math_text(s)
verts, codes = text_to_path.get_text_path(prop, clean_line,
ismath=ismath)
return verts, codes
|
bsd-2-clause
|
elijah513/scikit-learn
|
sklearn/datasets/twenty_newsgroups.py
|
72
|
13586
|
"""Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
|
bsd-3-clause
|
J535D165/recordlinkage
|
recordlinkage/index.py
|
1
|
16422
|
import warnings
import numpy
import pandas
from recordlinkage import rl_logging as logging
from recordlinkage.algorithms.indexing import (
random_pairs_with_replacement,
random_pairs_without_replacement_low_memory,
random_pairs_without_replacement)
from recordlinkage.base import BaseIndexAlgorithm
from recordlinkage.measures import full_index_size
from recordlinkage.utils import DeprecationHelper, listify
class Full(BaseIndexAlgorithm):
"""Class to generate a 'full' index.
A full index is an index with all possible combinations of record pairs.
In case of linking, this indexation method generates the cartesian product
of both DataFrame's. In case of deduplicating DataFrame A, this indexation
method are the pairs defined by the upper triangular matrix of the A x A.
Parameters
----------
**kwargs :
Additional keyword arguments to pass to
:class:`recordlinkage.base.BaseIndexAlgorithm`.
Note
----
This indexation method can be slow for large DataFrame's. The number of
comparisons scales quadratic.
Also, not all classifiers work well with large numbers of record pairs
were most of the pairs are distinct.
"""
def __init__(self, **kwargs):
super(Full, self).__init__(**kwargs)
logging.warning(
"indexing - performance warning "
"- A full index can result in large number of record pairs.")
def _link_index(self, df_a, df_b):
return pandas.MultiIndex.from_product(
[df_a.index.values, df_b.index.values])
def _dedup_index(self, df_a):
levels = [df_a.index.values, df_a.index.values]
codes = numpy.tril_indices(len(df_a.index), k=-1)
return pandas.MultiIndex(
levels=levels, codes=codes, verify_integrity=False)
class Block(BaseIndexAlgorithm):
"""Make candidate record pairs that agree on one or more variables.
Returns all record pairs that agree on the given variable(s). This
method is known as *blocking*. Blocking is an effective way to make a
subset of the record space (A * B).
Parameters
----------
left_on : label, optional
A column name or a list of column names of dataframe A. These
columns are used to block on.
right_on : label, optional
A column name or a list of column names of dataframe B. These
columns are used to block on. If 'right_on' is None, the `left_on`
value is used. Default None.
**kwargs :
Additional keyword arguments to pass to
:class:`recordlinkage.base.BaseIndexAlgorithm`.
Examples
--------
In the following example, the record pairs are made for two historical
datasets with census data. The datasets are named ``census_data_1980``
and ``census_data_1990``.
>>> indexer = recordlinkage.BlockIndex(on='first_name')
>>> indexer.index(census_data_1980, census_data_1990)
"""
def __init__(self, left_on=None, right_on=None, **kwargs):
on = kwargs.pop('on', None)
super(Block, self).__init__(**kwargs)
# variables to block on
self.left_on = left_on
self.right_on = right_on
if on is not None:
warnings.warn(
"The argument 'on' is deprecated. Use 'left_on=...' and "
"'right_on=None' to simulate the behaviour of 'on'.",
DeprecationWarning,
stacklevel=2)
self.left_on, self.right_on = on, on
def __repr__(self):
class_name = self.__class__.__name__
left_on, right_on = self._get_left_and_right_on()
return "<{} left_on={!r}, right_on={!r}>".format(
class_name, left_on, right_on)
def _get_left_and_right_on(self):
if self.right_on is None:
return (self.left_on, self.left_on)
else:
return (self.left_on, self.right_on)
def _link_index(self, df_a, df_b):
left_on, right_on = self._get_left_and_right_on()
left_on = listify(left_on)
right_on = listify(right_on)
blocking_keys = ["blocking_key_%d" % i for i, v in enumerate(left_on)]
# make a dataset for the data on the left
# 1. make a dataframe
# 2. rename columns
# 3. add index col
# 4. drop na (last step to presever index)
data_left = pandas.DataFrame(df_a[left_on], copy=False)
data_left.columns = blocking_keys
data_left['index_x'] = numpy.arange(len(df_a))
data_left.dropna(axis=0, how='any', subset=blocking_keys, inplace=True)
# make a dataset for the data on the right
data_right = pandas.DataFrame(df_b[right_on], copy=False)
data_right.columns = blocking_keys
data_right['index_y'] = numpy.arange(len(df_b))
data_right.dropna(
axis=0, how='any', subset=blocking_keys, inplace=True)
# merge the dataframes
pairs_df = data_left.merge(data_right, how='inner', on=blocking_keys)
return pandas.MultiIndex(
levels=[df_a.index.values, df_b.index.values],
codes=[pairs_df['index_x'].values, pairs_df['index_y'].values],
verify_integrity=False)
class SortedNeighbourhood(BaseIndexAlgorithm):
"""Make candidate record pairs with the SortedNeighbourhood algorithm.
This algorithm returns record pairs that agree on the sorting key, but
also records pairs in their neighbourhood. A large window size results
in more record pairs. A window size of 1 returns the blocking index.
The Sorted Neighbourhood Index method is a great method when there is
relatively large amount of spelling mistakes. Blocking will fail in
that situation because it excludes to many records on minor spelling
mistakes.
Parameters
----------
left_on : label, optional
The column name of the sorting key of the first/left dataframe.
right_on : label, optional
The column name of the sorting key of the second/right dataframe.
window: int, optional
The width of the window, default is 3
sorting_key_values: array, optional
A list of sorting key values (optional).
block_on: label
Additional columns to apply standard blocking on.
block_left_on: label
Additional columns in the left dataframe to apply standard
blocking on.
block_right_on: label
Additional columns in the right dataframe to apply standard
blocking on.
**kwargs :
Additional keyword arguments to pass to
:class:`recordlinkage.base.BaseIndexAlgorithm`.
Examples
--------
In the following example, the record pairs are made for two historical
datasets with census data. The datasets are named ``census_data_1980``
and ``census_data_1990``.
>>> indexer = recordlinkage.SortedNeighbourhoodIndex(
'first_name', window=9
)
>>> indexer.index(census_data_1980, census_data_1990)
When the sorting key has different names in both dataframes:
>>> indexer = recordlinkage.SortedNeighbourhoodIndex(
left_on='first_name', right_on='given_name', window=9
)
>>> indexer.index(census_data_1980, census_data_1990)
"""
def __init__(self,
left_on=None,
right_on=None,
window=3,
sorting_key_values=None,
block_on=[],
block_left_on=[],
block_right_on=[],
**kwargs):
on = kwargs.pop('on', None)
super(SortedNeighbourhood, self).__init__(**kwargs)
# variables to block on
self.left_on = left_on
self.right_on = right_on
self.window = window
self.sorting_key_values = sorting_key_values
self.block_on = block_on
self.block_left_on = block_left_on
self.block_right_on = block_right_on
if on is not None:
warnings.warn(
"The argument 'on' is deprecated. Use 'left_on=...' and "
"'right_on=None' to simulate the behaviour of 'on'.",
DeprecationWarning,
stacklevel=2)
self.left_on, self.right_on = on, on
def __repr__(self):
class_name = self.__class__.__name__
left_on, right_on = self._get_left_and_right_on()
return "<{} left_on={!r}, right_on={!r}>".format(
class_name, left_on, right_on)
def _get_left_and_right_on(self):
if self.right_on is None:
return (self.left_on, self.left_on)
else:
return (self.left_on, self.right_on)
def _get_sorting_key_values(self, array1, array2):
"""return the sorting key values as a series"""
concat_arrays = numpy.concatenate([array1, array2])
unique_values = numpy.unique(concat_arrays)
return numpy.sort(unique_values)
def _link_index(self, df_a, df_b):
left_on, right_on = self._get_left_and_right_on()
left_on = listify(left_on)
right_on = listify(right_on)
window = self.window
# Check if window is an odd number
if not isinstance(window, int) or (window < 0) or not bool(window % 2):
raise ValueError('window is not a positive and odd integer')
# # sorting key is single column
# if isinstance(self.on, (tuple, list, dict)):
# raise ValueError(
# "sorting key is not a label")
# make blocking keys correct
block_left_on = listify(self.block_left_on)
block_right_on = listify(self.block_right_on)
if self.block_on:
block_left_on = listify(self.block_on)
block_right_on = listify(self.block_on)
blocking_keys = ['sorting_key'] + \
["blocking_key_%d" % i for i, v in enumerate(block_left_on)]
# make a dataset for the data on the left
# 1. make a dataframe
# 2. rename columns
# 3. add index col
# 4. drop na (last step to presever index)
data_left = pandas.DataFrame(
df_a[listify(left_on) + block_left_on], copy=False)
data_left.columns = blocking_keys
data_left['index_x'] = numpy.arange(len(df_a))
data_left.dropna(axis=0, how='any', subset=blocking_keys, inplace=True)
data_right = pandas.DataFrame(
df_b[listify(right_on) + block_right_on], copy=False)
data_right.columns = blocking_keys
data_right['index_y'] = numpy.arange(len(df_b))
data_right.dropna(
axis=0, how='any', subset=blocking_keys, inplace=True)
# sorting_key_values is the terminology in Data Matching [Christen,
# 2012]
if self.sorting_key_values is None:
self.sorting_key_values = self._get_sorting_key_values(
data_left['sorting_key'].values,
data_right['sorting_key'].values)
sorting_key_factors = pandas.Series(
numpy.arange(len(self.sorting_key_values)),
index=self.sorting_key_values)
data_left['sorting_key'] = data_left['sorting_key'].map(
sorting_key_factors)
data_right['sorting_key'] = data_right['sorting_key'].map(
sorting_key_factors)
# Internal window size
_window = int((window - 1) / 2)
def merge_lagged(x, y, w):
"""Merge two dataframes with a lag on in the sorting key."""
y = y.copy()
y['sorting_key'] = y['sorting_key'] + w
return x.merge(y, how='inner')
pairs_concat = [
merge_lagged(data_left, data_right, w)
for w in range(-_window, _window + 1)
]
pairs_df = pandas.concat(pairs_concat, axis=0)
return pandas.MultiIndex(
levels=[df_a.index.values, df_b.index.values],
codes=[pairs_df['index_x'].values, pairs_df['index_y'].values],
verify_integrity=False)
class Random(BaseIndexAlgorithm):
"""Class to generate random pairs of records.
This class returns random pairs of records with or without
replacement. Use the random_state parameter to seed the algorithm and
reproduce results. This way to make record pairs is useful for the
training of unsupervised learning models for record linkage.
Parameters
----------
n : int
The number of record pairs to return. In case replace=False, the
integer n should be bounded by 0 < n <= n_max where n_max is the
maximum number of pairs possible.
replace : bool, optional
Whether the sample of record pairs is with or without replacement.
Default: True
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or
numpy.RandomState object.
**kwargs :
Additional keyword arguments to pass to
:class:`recordlinkage.base.BaseIndexAlgorithm`.
"""
def __init__(self, n, replace=True, random_state=None, **kwargs):
super(Random, self).__init__(**kwargs)
self.n = n
self.replace = replace
self.random_state = random_state
def __repr__(self):
class_name = self.__class__.__name__
return "<{} n={!r}, replace={!r}>".format(class_name, self.n,
self.replace)
def _link_index(self, df_a, df_b):
shape = (len(df_a), len(df_b))
n_max = full_index_size(shape)
if not isinstance(self.n, int):
raise ValueError('n must be an integer')
# with replacement
if self.replace:
if n_max == 0:
raise ValueError("one of the dataframes is empty")
pairs = random_pairs_with_replacement(self.n, shape,
self.random_state)
# without replacement
else:
if self.n <= 0 or self.n > n_max:
raise ValueError(
"n must be a integer satisfying 0<n<=%s" % n_max)
# the fraction of pairs in the sample
frac = self.n / n_max
# large dataframes
if n_max < 1e6 or frac > 0.5:
pairs = random_pairs_without_replacement(
self.n, shape, self.random_state)
# small dataframes
else:
pairs = random_pairs_without_replacement_low_memory(
self.n, shape, self.random_state)
levels = [df_a.index.values, df_b.index.values]
codes = pairs
return pandas.MultiIndex(
levels=levels, codes=codes, verify_integrity=False)
def _dedup_index(self, df_a):
shape = (len(df_a), )
# with replacement
if self.replace:
pairs = random_pairs_with_replacement(self.n, shape,
self.random_state)
# without replacement
else:
n_max = full_index_size(shape)
if not isinstance(self.n, int) or self.n <= 0 or self.n > n_max:
raise ValueError(
"n must be a integer satisfying 0<n<=%s" % n_max)
# large dataframes
if n_max < 1e6:
pairs = random_pairs_without_replacement(
self.n, shape, self.random_state)
# small dataframes
else:
pairs = random_pairs_without_replacement_low_memory(
self.n, shape, self.random_state)
levels = [df_a.index.values, df_a.index.values]
labels = pairs
return pandas.MultiIndex(
levels=levels, codes=labels, verify_integrity=False)
FullIndex = DeprecationHelper(
Full, "class recordlinkage.FullIndex is renamed and moved, "
"use recordlinkage.index.Full")
BlockIndex = DeprecationHelper(
Block, "class recordlinkage.BlockIndex is renamed and moved, "
"use recordlinkage.index.Block")
SortedNeighbourhoodIndex = DeprecationHelper(
SortedNeighbourhood, "class recordlinkage.SortedNeighbourhoodIndex "
"is renamed and moved, use recordlinkage.index.SortedNeighbourhood")
RandomIndex = DeprecationHelper(
Random, "class recordlinkage.RandomIndex is renamed and moved, "
"use recordlinkage.index.Random")
|
bsd-3-clause
|
conferency/find-my-reviewers
|
utilities/tokeniser.py
|
1
|
3837
|
import pandas as pd
import sqlite3
import nltk
from nltk.corpus import stopwords
import glob
from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
import json
from textblob import TextBlob
from multiprocessing import Pool
import sys
'''
Usage:
python tokenizer.py fulltext
python tokenizer.py fulltext noun_phrases
python tokenizer.py abstract
python tokenizer.py fulltext noun_phrases
'''
con = sqlite3.connect("data.sqlite") # specify your database here
db_documents = pd.read_sql_query("SELECT * from documents", con)
db_authors = pd.read_sql_query("SELECT * from authors", con)
data = db_documents.set_index("submission_path")
args = sys.argv
tokenised = {}
split = 0
mode = "abstract" # default mode
np = False
single_file_max_documents = 10 # the maximum documents per file. Useful when you have a limited memory.
def save_json(target_object, filename):
with open(filename, 'w') as fp:
json.dump(target_object, fp)
print("INFO: Saved", filename)
def save(number_suffix=""):
global np
if number_suffix:
number_suffix = "_" + str(number_suffix)
else:
number_suffix = ""
if np:
save_json(tokenised, mode + "_tokenised" + number_suffix + ".json")
else:
save_json(tokenised, mode + "_np_tokenised" + number_suffix + ".json")
def log(result):
global split
global tokenised
tokenised[result[0]] = result[1]
if len(tokenised) == single_file_max_documents:
print("INFO: Exceeded single_file_max_documents:", single_file_max_documents)
save(split)
print("INFO: Saved to split", split)
split += 1
tokenised = {}
def pdf2string(fname, pages=None):
if not pages:
pagenums = set()
else:
pagenums = set(pages)
output = StringIO(newline=None)
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
infile = open(fname, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
try:
interpreter.process_page(page)
except:
print("ERROR: Error while processing a page in", fname)
pass
infile.close()
converter.close()
text = output.getvalue()
output.close
return text
def textblob_tokenise(path, prefix, suffix, mode, noun_phrase=False):
filepath = prefix + path + suffix
# filepath = "F:/FMR/aisel.aisnet.org/" + path + "/fulltext.pdf"
print("INFO: Processing", path)
text = data.loc[path]["title"] + " " + data.loc[path]["abstract"]
def clean(text):
return text.replace("<p>", " ").replace("</p>", " ").replace("- ", "").replace("-", "")
if mode == "fulltext":
try:
text += " " + pdf2string(filepath)
except:
pass
if noun_phrase:
tokenised = list(TextBlob(clean(text).encode("ascii", "ignore").decode('ascii')).noun_phrases)
else:
tokenised = TextBlob(clean(text).encode("ascii", "ignore").decode('ascii')).words
print("INFO:", path, "done.")
return path, tokenised
if __name__ == "__main__":
p = Pool()
print(args)
try:
mode = args[1]
except IndexError:
print("WARNING: Unspecificed argument. It could be 'abstract' or 'fulltext'. Using '", mode, "'.")
try:
if args[2] == "noun_phrases":
print("INFO: Using noun phrase extraction.")
np = True
except IndexError:
pass
for i in data.index:
p.apply_async(textblob_tokenise, args = (i, "F:/FMR/aisel.aisnet.org/", "/fulltext.pdf", mode, np), callback = log)
p.close()
p.join()
save(split)
|
mit
|
neherlab/ffpopsim
|
tests/python_hiv.py
|
2
|
1305
|
# vim: fdm=indent
'''
author: Fabio Zanini
date: 25/04/12
content: Test script for the python bindings
'''
# Import module
import sys
sys.path.insert(0, '../pkg/python')
import numpy as np
import matplotlib.pyplot as plt
import FFPopSim as h
# Construct class
pop = h.hivpopulation(1000)
# Test I/O fitness landscapes
pop.set_replication_landscape(lethal_fraction=0.05,
number_valleys=0)
pop.read_replication_coefficients('hiv_model.dat')
rep = pop.get_replication_additive()
rep[np.random.random(10000) > 0.5] = -0.1
pop.set_replication_additive(rep)
# Show the additive part of the fitness landscape
print pop.get_trait_additive()
# Test population initialization
pop.set_allele_frequencies([0.3] * h.HIVGENOME, 1000)
# Test allele frequency readout
print np.max(pop.get_allele_frequency(4))
# Test evolution
from time import time as ti
t0 = ti()
pop.evolve(30)
t1 = ti()
print 'Time for evolving HIV for 30 generations: {:1.1f} s'.format(t1-t0)
# Write genotypes
pop.write_genotypes('test.txt', 100)
pop.write_genotypes_compressed('test.npz', 100)
# Plot histograms
plt.ion()
pop.plot_fitness_histogram()
pop.plot_divergence_histogram(color='r')
pop.plot_diversity_histogram(color='g')
# Test treatment changes
pop.treatment = 0.4
print pop.treatment
|
gpl-3.0
|
matsushitayuki/space-modeling
|
denoise_2.py
|
1
|
1144
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
from OD_setup import *
from denoise_setup import *
D = np.load('D_cifar_1.npy')
stride = 16
M,K = D.shape
n = int(M**0.5)
lamda = 1
sigma = 20
mu = 30/sigma
e = M*(1.15*sigma)**2
X_list = []
X = cv2.imread('barbara.jpg', 0)
X = np.array(X,float)
N = int(X.shape[0])
X_0 = np.array(X)
X_list.append(X_0)
np.random.seed(0)
X += np.random.normal(0,sigma,(N,N))
X_noisy = np.array(X)
X_list.append(X_noisy)
RTR = R_T_R(N,n)
RTR += mu*np.ones((N**2,1))
inv = 1/RTR
for i in range(5):
R_T_Dalpha = np.zeros((N,N))
for i in np.arange(0,(N-n+1),stride):
for j in np.arange(0,(N-n+1),stride):
print(i,j)
RX = R(X,i,j,n)
RX = RX.reshape(n**2,1)
alpha = alpha_line(lamda,RX,D,50,e)
Dalpha = (D@alpha).reshape(n,n)
R_T_Dalpha += R_T(Dalpha,i,j,N)
X = X.reshape(N**2,1)
R_T_Dalpha = R_T_Dalpha.reshape(N**2,1)
X = inv * (mu*X + R_T_Dalpha)
X = X.reshape(N,N)
X_list.append(X)
for i in range(len(X_list)):
plt.subplot(1,3,i+1)
plt.imshow(X_list[i])
plt.gray()
plt.show()
|
gpl-3.0
|
phobson/pybmpdb
|
pybmpdb/tests/test_bmpdb.py
|
2
|
7561
|
import sys
import os
import zipfile
from io import StringIO
from pkg_resources import resource_filename
from urllib import request
from pathlib import Path
from unittest.mock import patch
import pytest
import numpy.testing as nptest
import pandas.util.testing as pdtest
import numpy
import pandas
from pybmpdb import bmpdb
import wqio
def get_data_file(filename):
return resource_filename("pybmpdb.tests._data", filename)
@pytest.fixture
def df_for_quals():
df = pandas.DataFrame(
[
{"res": 1, "DL": 2, "qual": "U"},
{"res": 1, "DL": 2, "qual": "UK"},
{"res": 1, "DL": 2, "qual": "UA"},
{"res": 1, "DL": 2, "qual": "UC"},
{"res": 1, "DL": 2, "qual": "K"},
{"res": 5.0, "DL": 15.0, "qual": "UJ"},
{"res": 5.0, "DL": 10.0, "qual": "UJ"},
{"res": 10.0, "DL": 5.0, "qual": "UJ"},
{"res": 10.0, "DL": 10.0, "qual": "UJ"},
{"res": 5.0, "DL": 5.0, "qual": "junk"},
]
)
return df
def test__handle_ND_factors(df_for_quals):
expected = numpy.array([2, 2, 2, 2, 2, 3, 2, 1, 1, 1])
result = bmpdb._handle_ND_factors(df_for_quals)
nptest.assert_array_equal(result, expected)
def test__handle_ND_qualifiers(df_for_quals):
result = bmpdb._handle_ND_qualifiers(df_for_quals)
expected = numpy.array(["ND", "ND", "ND", "ND", "ND", "ND", "ND", "=", "ND", "="])
nptest.assert_array_equal(result, expected)
def test__process_screening():
df = pandas.DataFrame({"screen": ["Yes", "INC", "No", "eXC", "junk"]})
expected = numpy.array(["yes", "yes", "no", "no", "invalid"])
result = bmpdb._process_screening(df, "screen")
nptest.assert_array_equal(result, expected)
def test__process_sampletype():
df = pandas.DataFrame(
{"sampletype": ["SRL GraB asdf", "SeL cOMPositE df", "jeL LSDR as"]}
)
expected = numpy.array(["grab", "composite", "unknown"])
result = bmpdb._process_sampletype(df, "sampletype")
nptest.assert_array_equal(result, expected)
def test__check_levelnames():
bmpdb._check_levelnames(["epazone", "category"])
with pytest.raises(ValueError):
bmpdb._check_levelnames(["site", "junk"])
@patch.object(pandas, "read_csv")
def test_load_data(read_csv):
bmpdb.load_data("bmp.csv")
read_csv.assert_called_once_with(
Path("bmp.csv"), parse_dates=["sampledate"], encoding="utf-8"
)
@pytest.mark.skipif(True, reason="test not ready")
def test_clean_raw_data():
pass
def test_transform_parameters():
index_cols = ["storm", "param", "units"]
df = pandas.DataFrame(
{
"storm": [1, 1, 2, 2, 3, 3],
"param": list("ABABAB"),
"units": ["mg/L"] * 6,
"res": [1, 2, 3, 4, 5, 6],
"qual": ["<", "="] * 3,
}
).set_index(index_cols)
expected = pandas.DataFrame(
{
"storm": [1, 1, 2, 2, 3, 3, 1, 2, 3],
"param": list("ABABABCCC"),
"units": (["mg/L"] * 6) + (["ug/L"] * 3),
"res": [1, 2, 3, 4, 5, 6, 3000, 7000, 11000],
"qual": (["<", "="] * 3) + (["="] * 3),
}
).set_index(index_cols)
old_params = ["A", "B"]
new_param = "C"
result = bmpdb.transform_parameters(
df,
old_params,
new_param,
"ug/L",
lambda x: 1000 * x["res"].sum(axis=1),
lambda x: x[("qual", "B")],
paramlevel="param",
)
pdtest.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("fxn", "args", "index_cols", "infilename", "outfilename"),
[
(
bmpdb._pick_best_station,
[],
["site", "bmp", "storm", "parameter", "station"],
"test_pick_station_input.csv",
"test_pick_station_output.csv",
),
(
bmpdb._pick_best_sampletype,
[],
["site", "bmp", "storm", "parameter", "station", "sampletype"],
"test_pick_sampletype_input.csv",
"test_pick_sampletype_output.csv",
),
(
bmpdb._filter_by_storm_count,
[6],
["category", "site", "bmp", "storm", "parameter", "station"],
"test_filter_bmp-storm_counts_input.csv",
"test_filter_storm_counts_output.csv",
),
(
bmpdb._filter_by_BMP_count,
[4],
["category", "site", "bmp", "parameter", "station"],
"test_filter_bmp-storm_counts_input.csv",
"test_filter_bmp_counts_output.csv",
),
],
)
def test_summary_filter_functions(fxn, args, index_cols, infilename, outfilename):
input_df = pandas.read_csv(get_data_file(infilename), index_col=index_cols)
expected_df = pandas.read_csv(
get_data_file(outfilename), index_col=index_cols
).sort_index()
test_df = fxn(input_df, *args).sort_index()
pdtest.assert_frame_equal(expected_df.reset_index(), test_df.reset_index())
@pytest.mark.parametrize("doit", [True, False])
@pytest.mark.parametrize(
("fxn", "index_cols", "infilename", "outfilename"),
[
(
bmpdb._maybe_filter_onesided_BMPs,
["category", "site", "bmp", "storm", "parameter", "station"],
"test_filter_onesidedbmps_input.csv",
"test_filter_onesidedbmps_output.csv",
),
(
bmpdb._maybe_combine_nox,
["bmp", "category", "storm", "units", "parameter"],
"test_WBRP_NOx_input.csv",
"test_NOx_output.csv",
),
(
bmpdb._maybe_combine_WB_RP,
["bmp", "category", "storm", "units", "parameter"],
"test_WBRP_NOx_input.csv",
"test_WBRP_output.csv",
),
(
bmpdb._maybe_fix_PFCs,
["bmp", "category", "bmptype", "storm", "parameter"],
"test_PFCs_input.csv",
"test_PFCs_output.csv",
),
(
bmpdb._maybe_remove_grabs,
["bmp", "category", "sampletype", "storm"],
"test_grabsample_input.csv",
"test_grabsample_output.csv",
),
],
)
def test__maybe_filter_functions(fxn, doit, index_cols, infilename, outfilename):
input_df = pandas.read_csv(get_data_file(infilename), index_col=index_cols)
result = fxn(input_df, doit).sort_index()
if doit:
expected = pandas.read_csv(
get_data_file(outfilename), index_col=index_cols
).sort_index()
else:
expected = input_df.copy().sort_index()
pdtest.assert_frame_equal(result, expected)
def test__pick_non_null():
df = pandas.DataFrame(
{
("res", "this"): [1.0, numpy.nan, 2.0, numpy.nan],
("res", "that"): [numpy.nan, numpy.nan, 9.0, 3.0],
}
)
expected = numpy.array([1.0, numpy.nan, 2.0, 3.0])
result = bmpdb._pick_non_null(df, "res", "this", "that")
nptest.assert_array_equal(result, expected)
def test_paired_qual():
df = pandas.DataFrame(
{"in_qual": ["=", "=", "ND", "ND"], "out_qual": ["=", "ND", "=", "ND"]}
)
expected = ["Pair", "Effluent ND", "Influent ND", "Both ND"]
result = bmpdb.paired_qual(df, "in_qual", "out_qual")
nptest.assert_array_equal(result, expected)
|
bsd-3-clause
|
giorgiop/scikit-learn
|
sklearn/covariance/tests/test_robust_covariance.py
|
77
|
3825
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'fast_mcd expects at least 2 samples',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'MinCovDet expects at least 2 samples',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
|
bsd-3-clause
|
mtdx/ml-algorithms
|
regression/linear-regression-eg.py
|
1
|
1818
|
import pandas as pd
import quandl
import math
import numpy as np
import datetime
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
style.use('ggplot')
from sklearn import preprocessing, model_selection, svm
from sklearn.linear_model import LinearRegression
df = quandl.get('WIKI/GOOGL')
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
#
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
forecast_c = 'Adj. Close'
df.fillna(-99999, inplace=True)
forecast_o = int(math.ceil(0.1 * len(df)))
# set label
df['label'] = df[forecast_c].shift(-forecast_o)
X = np.array(df.drop(['label'], 1))
X = preprocessing.scale(X)
X_lately = X[-forecast_o:]
X = X[:-forecast_o]
df.dropna(inplace=True)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
clf = LinearRegression(n_jobs=-1)
clf.fit(X_train, y_train)
# with open('pickle/linearregression.pickle', 'wb') as f:
# pickle.dump(clf, f)
# pickle_in = open('pickle/linearregression.pickle', 'rb')
# clf = pickle.load(pickle_in)
accuracy = clf.score(X_test, y_test)
forecast_set = clf.predict(X_lately)
# print(forecast_set, accuracy, forecast_o)
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += 86400
df.loc[next_date] = [np.nan for _ in range(len(df.columns) - 1)] + [i]
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
|
mit
|
elaeon/ML
|
src/dama/clf/extended/w_sklearn.py
|
1
|
6212
|
from sklearn.calibration import CalibratedClassifierCV
from dama.clf.wrappers import SKL, SKLP
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression as LReg
from sklearn.linear_model import SGDClassifier as SGDClassif
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
class SVC(SKL):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
model = CalibratedClassifierCV(svm.LinearSVC(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
class RandomForest(SKLP):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
if model_params is None:
model_params = dict(n_estimators=25, min_samples_split=2)
model = CalibratedClassifierCV(RandomForestClassifier(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
class ExtraTrees(SKLP):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
model = CalibratedClassifierCV(ExtraTreesClassifier(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
class LogisticRegression(SKLP):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
model = CalibratedClassifierCV(LReg(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
class SGDClassifier(SKLP):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
model = CalibratedClassifierCV(SGDClassif(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
class AdaBoost(SKLP):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
model = CalibratedClassifierCV(AdaBoostClassifier(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
class GradientBoost(SKLP):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
model = CalibratedClassifierCV(GradientBoostingClassifier(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
class KNN(SKLP):
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None):
model = CalibratedClassifierCV(KNeighborsClassifier(**model_params), method="sigmoid")
model_clf = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
cal_model = CalibratedClassifierCV(model_clf, method="sigmoid", cv="prefit")
cal_model.fit(self.ds[self.data_groups["data_validation_group"]].to_ndarray(),
self.ds[self.data_groups["target_validation_group"]].to_ndarray())
return self.ml_model(cal_model)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.