repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
arizona-phonological-imaging-lab/Autotrace
|
matlab-version/LinguaView.py
|
3
|
5404
|
#!/usr/bin/env python
import sys
import os
import neutralContour as nc
import LabelWindow as lw
import AnalysisWindow as aw
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import gobject
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
class LinguaViewer:
"""This is the class for the main window of LinguaViewer"""
def __init__(self, datafiles=[]):
#self.static_dir = '/Users/jeff/autotracer/trunk/LinguaViewer/'
self.gladefile = "LinguaViewer.glade"
self.wTree = gtk.glade.XML(self.gladefile, "mainwindow")
self.win = self.wTree.get_widget("mainwindow")
self.win.set_title("LinguaView")
self.mainVBox = self.wTree.get_widget("vbox2")
dic = { "on_mainwindow_destroy": gtk.main_quit,
"on_quit_activate" : gtk.main_quit,
"on_open_activate" : self.onOpen,
"on_tbOpen_clicked" : self.onOpen,
"on_tbView_clicked": self.onView,
"on_tbLabel1_clicked": self.onLabel,
"on_tbRemove_clicked" : self.onRemove,
"on_tbAnalyze_clicked" : self.onAnalyze,
"on_showlinguagram_toggled": self.showlinguagram,
"on_showneutral_toggled": self.showneutral,
"on_showwave_toggled": self.showwave,
"on_showspec_toggled": self.showspec}
self.wTree.signal_autoconnect(dic)
self.SHOW_LING = False
self.SHOW_NEUT = False
self.SHOW_WAVE = False
self.SHOW_SPEC = False
self.linguaToggle = self.wTree.get_widget("showlinguagram")
self.neutralToggle = self.wTree.get_widget("showneutral")
self.neutralToggle.set_active(True)
self.waveToggle = self.wTree.get_widget("showwave")
self.waveToggle.set_active(True)
self.specToggle = self.wTree.get_widget("showspec")
self.specToggle.set_active(True)
self.TreeView = self.wTree.get_widget("treeview1")
column = gtk.TreeViewColumn("Data Files", gtk.CellRendererText(), text=0)
column.set_resizable(True)
column.set_sort_column_id(0)
self.TreeView.append_column(column)
self.DataList = gtk.ListStore(str)
self.TreeView.set_model(self.DataList)
self.datafiles = datafiles
if len(self.datafiles) > 0:
for i in self.datafiles:
self.DataList.append([i])
self.labelInd = 0
def onOpen(self, event):
fc = gtk.FileChooserDialog(title='Open Data Files', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
ffilter = gtk.FileFilter()
ffilter.set_name('.csv Files')
ffilter.add_pattern('*.csv')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.datafiles = fc.get_filenames()
g_directory = fc.get_current_folder()
for i in self.datafiles:
self.DataList.append([i])
fc.destroy()
def onRemove(self, event):
selection = self.TreeView.get_selection()
model, select_iter = selection.get_selected()
if (select_iter):
self.DataList.remove(select_iter)
def onView(self, event):
selection = self.TreeView.get_selection()
model, select_iter = selection.get_selected()
if (select_iter):
fname = self.DataList.get_value(select_iter, 0)
n = fname.split('/')
neutralfname = '/'.join(n[:-1]) + '/neutral.csv'
nc.NeutralTongue(fname, neutralfname, self.SHOW_LING, self.SHOW_NEUT, self.SHOW_WAVE, self.SHOW_SPEC)
def onLabel(self, event):
selection = self.TreeView.get_selection()
model, select_iter = selection.get_selected()
if (select_iter):
fname = self.DataList.get_value(select_iter, 0)
n = fname.split('/')
neutralfname = '/'.join(n[:-1]) + '/neutral.csv'
lw.LabelWindow([fname], self.SHOW_LING, self.SHOW_NEUT, self.SHOW_WAVE, self.SHOW_SPEC)
def onAnalyze(self, event):
aw.AnalysisWindow(self.datafiles)
def showlinguagram(self, event):
if self.SHOW_LING == False:
self.SHOW_LING = True
else:
self.SHOW_LING = False
def showneutral(self, event):
if self.SHOW_NEUT == False:
self.SHOW_NEUT = True
else:
self.SHOW_NEUT = False
def showwave(self, event):
if self.SHOW_WAVE == False:
self.SHOW_WAVE = True
else:
self.SHOW_WAVE = False
def showspec(self, event):
if self.SHOW_SPEC == False:
self.SHOW_SPEC = True
else:
self.SHOW_SPEC = False
if __name__ == "__main__":
LinguaViewer()
gtk.main()
|
mit
|
siutanwong/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
joshbohde/scikit-learn
|
examples/applications/face_recognition.py
|
2
|
5432
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Gerhard_Schroeder 0.91 0.75 0.82 28
Donald_Rumsfeld 0.84 0.82 0.83 33
Tony_Blair 0.65 0.82 0.73 34
Colin_Powell 0.78 0.88 0.83 58
George_W_Bush 0.93 0.86 0.90 129
avg / total 0.86 0.84 0.85 282
.. image:: /images/plot_face_recognition_1.png
:scale: 50%
.. image:: /images/plot_face_recognition_2.png
:scale: 50%
"""
print __doc__
from time import time
import logging
import pylab as pl
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
################################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# reshape the data using the traditional (n_samples, n_features) shape
faces = lfw_people.data
n_samples, h, w = faces.shape
X = faces.reshape((n_samples, h * w))
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
################################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
train, test = iter(StratifiedKFold(y, k=4)).next()
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
################################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print "Extracting the top %d eigenfaces from %d faces" % (
n_components, X_train.shape[0])
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)
eigenfaces = pca.components_.reshape((n_components, h, w))
print "Projecting the input data on the eigenfaces orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)
################################################################################
# Train a SVM classification model
print "Fitting the classifier to the training set"
t0 = time()
param_grid = {
'C': [1, 5, 10, 50, 100],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
clf = GridSearchCV(SVC(kernel='rbf'), param_grid,
fit_params={'class_weight': 'auto'})
clf = clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best estimator found by grid search:"
print clf.best_estimator
################################################################################
# Quantitative evaluation of the model quality on the test set
print "Predicting the people names on the testing set"
t0 = time()
y_pred = clf.predict(X_test_pca)
print "done in %0.3fs" % (time() - t0)
print classification_report(y_test, y_pred, target_names=target_names)
print confusion_matrix(y_test, y_pred, labels=range(n_classes))
################################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show()
|
bsd-3-clause
|
xwolf12/scikit-learn
|
sklearn/calibration.py
|
137
|
18876
|
"""Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
|
bsd-3-clause
|
mehdidc/fluentopt
|
benchmarks/coco.py
|
1
|
3877
|
"""
This benchmark example uses the coco benchmark set of functions
(<http://coco.gforge.inria.fr/>, <https://github.com/numbbo/coco>)
to compare optimizers provided by fluentopt between themselves and also
with CMA-ES[1].
To run these benchmarks, the package 'cocoex' must be installed,
check <https://github.com/numbbo/coco> to see how to install it.
Also, the package 'cma' is needed and can be installed by pip.
For each function, each algorithm is ran for independent trials
and the results are all written in a csv file (by default benchmarks.csv).
each row correspond to a trial for a given algo and function.
The columns are:
- 'func' : function name (str)
- 'algo' : algo name (str)
- 'nbeval' : nb of evaluations performed (int)
- 'ybest' : the best output value found (float)
- 'duration' : duration in seconds (float)
[1] Nikolaus Hansen and Andreas Ostermeier, Completely derandomized
self-adaptation in evolution strategies.
Evolutionary computation, 9(2):159–195, 2001
"""
import time
import numpy as np
import pandas as pd
from cocoex import Suite, Observer
from fluentopt import BayesianOptimizer
from fluentopt.bayesianoptimizer import ucb_minimize
from fluentopt.transformers import Wrapper
from fluentopt import RandomSearch
from cma import fmin as cma_fmin
from cma import CMAEvolutionStrategy
from clize import run
def cma(fun, budget):
sigma0 = 0.02
range_ = fun.upper_bounds - fun.lower_bounds
center = fun.lower_bounds + range_ / 2
x0 = center
options = dict(
scaling=range_ / range_[0], maxfevals=budget, verb_log=0, verb_disp=1, verbose=1
)
es = CMAEvolutionStrategy(x0, sigma0 * range_[0], options)
res = es.optimize(fun).result()
xbest, ybest, nbeval, *rest = res
return xbest, ybest, nbeval
def ucb(fun, budget):
sampler = _uniform_sampler(low=fun.lower_bounds, high=fun.upper_bounds)
opt = BayesianOptimizer(sampler=sampler, score=ucb_minimize, nb_suggestions=100)
return _run_opt(opt, fun, budget)
def random_search(fun, budget):
sampler = _uniform_sampler(low=fun.lower_bounds, high=fun.upper_bounds)
opt = RandomSearch(sampler=sampler)
return _run_opt(opt, fun, budget)
def _uniform_sampler(low, high):
low = np.array(low)
high = np.array(high)
dim = len(low)
def sampler_(rng):
return rng.uniform(0, 1, size=dim) * (high - low) + low
return sampler_
def _run_opt(opt, feval, budget):
for _ in range(budget):
x = opt.suggest()
y = feval(x)
opt.update(x=x, y=y)
idx = np.argmin(opt.output_history_)
xbest = opt.input_history_[idx]
ybest = opt.output_history_[idx]
nbeval = budget
return xbest, ybest, nbeval
def main(nb_trials=15, budget_per_dim=100, output="benchmark.csv"):
suite_instance = "year:2016"
suite_name = "bbob"
suite_options = ""
suite = Suite(suite_name, suite_instance, suite_options)
algos = [random_search, cma, ucb]
stats = []
for i, fun in enumerate(suite):
print("Function {}".format(fun.name))
for algo in algos:
algo_name = algo.__name__
print('Algo : "{}"'.format(algo_name))
for trial in range(nb_trials):
print("Running trial {}...".format(trial + 1))
t0 = time.time()
xbest, ybest, nbeval = algo(fun, budget_per_dim * fun.dimension)
delta_t = time.time() - t0
stats.append(
{
"func": fun.id,
"algo": algo_name,
"nbeval": nbeval,
"ybest": ybest,
"duration": delta_t,
}
)
stats = pd.DataFrame(stats)
stats.to_csv(output, index=False)
if __name__ == "__main__":
run(main)
|
bsd-3-clause
|
jdemonasterio/Bayes-for-Hackers
|
Chapter2_MorePyMC/separation_plot.py
|
86
|
1494
|
# separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
|
mit
|
rajat1994/scikit-learn
|
examples/calibration/plot_calibration.py
|
225
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
wdwvt1/scikit-bio
|
skbio/stats/power.py
|
3
|
51457
|
r"""
Empirical Power Estimation (:mod:`skbio.stats.power`)
=====================================================
.. currentmodule:: skbio.stats.power
The purpose of this module is to provide empirical, post-hoc power estimation
of normally and non-normally distributed data. It also provides support to
subsample data to facilitate this analysis.
The underlying principle is based on subsampling and Monte Carlo simulation.
Assume that there is some set of populations, :math:`K_{1}, K_{2}, ... K_{n}`
which have some property, :math:`\mu` such that :math:`\mu_{1} \neq \mu_{2}
\neq ... \neq \mu_{n}`. For each of the populations, a sample, :math:`S` can be
drawn, with a parameter, :math:`x` where :math:`x \approx \mu` and for the
samples, we can use a test, :math:`f`, to show that :math:`x_{1} \neq x_{2}
\neq ... \neq x_{n}`.
Since we know that :math:`\mu_{1} \neq \mu_{2} \neq ... \neq \mu_{n}`,
we know we should reject the null hypothesis. If we fail to reject the null
hypothesis, we have committed a Type II error and our result is a false
negative. We can estimate the frequency of Type II errors at various sampling
depths by repeatedly subsampling the populations and observing how often we
see a false negative. If we repeat this several times for each subsampling
depth, and vary the depths we use, we can start to approximate a relationship
between the number of samples we use and the rate of false negatives, also
called the statistical power of the test.
To generate complete power curves from data which appears underpowered, the
`statsmodels.stats.power` package can be used to solve for an effect size. The
effect size can be used to extrapolate a power curve for the data.
Most functions in this module accept a statistical test function which takes a
list of samples and returns a p value. The test is then evaluated over a series
of subsamples.
Sampling may be handled in two ways. For any set of samples, we may simply
choose to draw :math:`n` observations at random for each sample. Alternatively,
if metadata is available, samples can be matched based on a set of control
categories so that paired samples are drawn at random from the set of available
matches.
Functions
---------
.. autosummary::
:toctree: generated/
subsample_power
subsample_paired_power
confidence_bound
paired_subsamples
bootstrap_power_curve
Examples
--------
Suppose we wanted to test that there's a relationship between two random
variables, `ind` and `dep`. Let's use random subsampling to estimate the
statistical power of our test with an alpha of 0.1, 0.01, and 0.001.
To control for the pseudo-random number generation, we will use a seed.
When using these functions with your own data, you don't need to include the
step.
>>> import numpy as np
>>> np.random.seed(20)
>>> ind = np.random.randint(0, 20, 15)
>>> ind
array([ 3, 15, 9, 11, 7, 2, 0, 8, 19, 16, 6, 6, 16, 9, 5])
>>> dep = (3 * ind + 5 + np.random.randn(15) * 5).round(3)
>>> dep
array([ 15.617, 47.533, 28.04 , 33.788, 19.602, 12.229, 4.779,
36.838, 67.256, 55.032, 22.157, 7.051, 58.601, 38.664,
18.783])
Let's define a test that will draw a list of sample pairs and determine
if they're correlated. We'll use `scipy.stats.pearsonr` which takes two arrays
and returns a correlation coefficient and a p-value representing the
probability the two distributions are correlated.
>>> from scipy.stats import pearsonr
>>> f = lambda x: pearsonr(x[0], x[1])[1]
Now, let's use random sampling to estimate the power of our test on
the first distribution.
>>> samples = [ind, dep]
>>> f(samples)
3.6459452596563003e-08
In `subsample_power`, we can maintain a paired relationship between samples
by setting `draw_mode` to "matched". We can also set our critical value, so
that we estimate power for a critical value of :math:`\alpha = 0.05`, an
estimate for the critical value of 0.01, and a critical value of 0.001.
>>> from skbio.stats.power import subsample_power
>>> pwr_100, counts_100 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.1)
>>> pwr_010, counts_010 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.01)
>>> pwr_001, counts_001 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.001)
>>> counts_100
array([3, 4, 5, 6, 7, 8, 9])
>>> pwr_100.mean(0)
array([ 0.4716, 0.8226, 0.9424, 0.986 , 0.9988, 1. , 1. ])
>>> pwr_010.mean(0)
array([ 0.0492, 0.2368, 0.5462, 0.823 , 0.9474, 0.9828, 0.9982])
>>> pwr_001.mean(0)
array([ 0.0028, 0.0174, 0.1262, 0.342 , 0.5928, 0.8256, 0.9594])
Based on this power estimate, as we increase our confidence that we have not
committed a type I error and identified a false positive, the number of samples
we need to be confident that we have not committed a type II error increases.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.utils import viewitems
from future.builtins import range
import collections
import copy
import numpy as np
import scipy.stats
import six
from skbio.util._decorator import experimental, deprecated
@experimental(as_of="0.4.0")
def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05, ratio=None,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Subsamples data to iteratively calculate power
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
alpha_pwr : float, optional
The critical value used to calculate the power.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point
on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array has
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Let's say we wanted to look at the relationship between the presence of a
specific bacteria, *Gardnerella vaginalis* in the vaginal community, and
the probability of a pre or post menopausal woman experiencing a urinary
tract infection (UTI). Healthy women were enrolled in the study either
before or after menopause, and followed for eight weeks. Participants
submitted fecal samples at the beginning of the study, and were then
followed for clinical symptoms of a UTI. A confirmed UTI was an endpoint
in the study.
Using available literature and 16S sequencing, a set of candidate taxa were
identified as correlated with UTIs, including *G. vaginalis*. In the 100
women (50 premenopausal and 50 postmenopausal samples) who had UTIs, the
presence or absence of *G. vaginalis* was confirmed with quantitative PCR.
We can model the probability that detectable *G. vaginalis* was found in
these samples using a binomial model. (*Note that this is a simulation.*)
>>> import numpy as np
>>> np.random.seed(25)
>>> pre_rate = np.random.binomial(1, 0.85, size=(50,))
>>> pre_rate.sum()
45
>>> pos_rate = np.random.binomial(1, 0.40, size=(50,))
>>> pos_rate.sum()
21
Let's set up a test function, so we can test the probability of
finding a difference in frequency between the two groups. We'll use
`scipy.stats.chisquare` to look for the difference in frequency between
groups.
>>> from scipy.stats import chisquare, nanmean
>>> test = lambda x: chisquare(np.array([x[i].sum() for i in
... xrange(len(x))]))[1]
Let's make sure that our two distributions are different.
>>> round(test([pre_rate, pos_rate]), 3)
0.003
Since there are an even number of samples, and we don't have enough
information to try controlling the data, we'll use
`skbio.stats.power.subsample_power` to compare the two groups. If we had
metadata about other risk factors, like a reproductive history, BMI,
tobacco use, we might want to use
`skbio.stats.power.subsample_paired_power`.
We'll also use "ind" `draw_mode`, since there is no linkage between the
two groups of samples.
>>> from skbio.stats.power import subsample_power
>>> pwr_est, counts = subsample_power(test=test,
... samples=[pre_rate, pos_rate],
... num_iter=100,
... num_runs=5,
... counts_interval=5)
>>> counts
array([ 5, 10, 15, 20, 25, 30, 35, 40, 45])
>>> nanmean(pwr_est, 0) # doctest: +NORMALIZE_WHITESPACE
array([ 0.056, 0.074, 0.226, 0.46 , 0.61 , 0.806, 0.952, 1. ,
1. ])
>>> counts[nanmean(pwr_est, 0) > 0.8].min()
30
So, we can estimate that we will see a significant difference in the
presence of *G. vaginalis* in the stool of pre and post women with UTIs if
we have at least 30 samples per group.
If we wanted to test the relationship of a second candidate taxa which is
more rare in the population, but may have a similar effect, based on
available literature, we might also start by trying to identify 30
samples per group where the second candidate taxa is present.
Suppose, now, that we want to test that a secondary metabolite seen only in
the presence of *G vaginalis* to see if it is also correlated with UTIs. We
can model the abundance of the metabolite as a normal distribution.
>>> met_pos = (np.random.randn(pre_rate.sum() + pos_rate.sum()) * 2000 +
... 2500)
>>> met_pos[met_pos < 0] = 0
>>> met_neg = met_neg = (np.random.randn(100 - (pre_rate.sum() +
... pos_rate.sum())) * 2000 + 500)
>>> met_neg[met_neg < 0] = 0
Let's compare the populations with a kruskal-wallis test. Physically, there
cannot be a negative concentration of a chemical, so we've set the lower
bound at 0. This means that we can no longer assume our distribution is
normal.
>>> from scipy.stats import kruskal
>>> def metabolite_test(x):
... return kruskal(x[0], x[1])[1]
>>> round(metabolite_test([met_pos, met_neg]), 3)
0.005
When we go to perform the statistical test on all the data, you might
notice that there are twice as many samples from women with *G. vaginalis*
than those without. It might make sense to account for this difference when
we're testing power. So, we're going to set the `ratio` parameter, which
lets us draw twice as many samples from women with *G. vaginalis*.
>>> pwr_est2, counts2 = subsample_power(test=metabolite_test,
... samples=[met_pos, met_neg],
... counts_interval=5,
... num_iter=100,
... num_runs=5,
... ratio=[2, 1])
>>> counts2
array([ 5., 10., 15., 20., 25., 30.])
>>> nanmean(pwr_est2, 0)
array([ 0.14 , 0.272, 0.426, 0.646, 0.824, 0.996])
>>> counts2[nanmean(pwr_est2, 0) > 0.8].min()
25.0
When we consider the number of samples per group needed in the power
analysis, we need to look at the ratio. The analysis says that we need 25
samples in the smallest group, in this case, the group of women without
*G. vaginalis* and 50 samples from women with *G. vaginalis* to see a
significant difference in the abundance of our secondary metabolite at 80%
power.
"""
# Checks the inputs
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=samples,
draw_mode=draw_mode,
ratio=ratio,
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates the power instances
for id2, c in enumerate(sample_counts):
count = np.round(c * ratio, 0).astype(int)
for id1 in range(num_runs):
ps = _compare_distributions(test=test,
samples=samples,
num_p=num_p,
counts=count,
num_iter=num_iter,
mode=draw_mode)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
@experimental(as_of="0.4.0")
def subsample_paired_power(test, meta, cat, control_cats, order=None,
strict_match=True, alpha_pwr=0.05,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Estimates power iteratively using samples with matching metadata
Parameters
----------
test : function
The statistical test which accepts a list of arrays sample ids and
returns a p value.
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str
The metadata category being varied between samples.
control_cats : list
The metadata categories to be used as controls. For example, if
you wanted to vary age (`cat` = "AGE"), you might want to control
for gender and health status (i.e. `control_cats` = ["SEX",
"HEALTHY"]).
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match : bool, optional
This determines how data is grouped using
`control_cats`. If a sample within `meta` has an undefined value (NaN)
for any of the columns in `control_cats`, the sample will not be
considered as having a match and will be ignored when `strict_match`
is True. If `strict_match` is False, missing values (NaN) in the
`control_cats` can be considered matches.
alpha_pwr : float, optional
The critical value used to calculate the power.
max_counts : positive int, optional
The maximum number of observations per sample to draw
for effect size calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array is
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Assume you are interested in the role of a specific cytokine of protein
translocation in myeloid-lineage cells. You are able to culture two
macrophage lineages (bone marrow derived phagocytes and
peritoneally-derived macrophages). Due to unfortunate circumstances, your
growth media must be acquired from multiple sources (lab, company A,
company B). Also unfortunate, you must use labor-intensive low throughput
assays. You have some preliminary measurements, and you'd like to
predict how many (more) cells you need to analyze for 80% power.
You have information about 60 cells, which we'll simulate below. Note
that we are setting a random seed value for consistency.
>>> import numpy as np
>>> import pandas as pd
>>> np.random.seed(25)
>>> data = pd.DataFrame.from_dict({
... 'CELL_LINE': np.random.binomial(1, 0.5, size=(60,)),
... 'SOURCE': np.random.binomial(2, 0.33, size=(60,)),
... 'TREATMENT': np.hstack((np.zeros((30)), np.ones((30)))),
... 'INCUBATOR': np.random.binomial(1, 0.2, size=(60,))})
>>> data['OUTCOME'] = (0.25 + data.TREATMENT * 0.25) + \
... np.random.randn(60) * (0.1 + data.SOURCE/10 + data.CELL_LINE/5)
>>> data.loc[data.OUTCOME < 0, 'OUTCOME'] = 0
>>> data.loc[data.OUTCOME > 1, 'OUTCOME'] = 1
We will approach this by assuming that the distribution of our outcome is
not normally distributed, and apply a kruskal-wallis test to compare
between the cytokine treated and untreated cells.
>>> from scipy.stats import kruskal
>>> f = lambda x: kruskal(*[data.loc[i, 'OUTCOME'] for i in x])[1]
Let's check that cytokine treatment has a significant effect across all
the cells.
>>> treatment_stat = [g for g in data.groupby('TREATMENT').groups.values()]
>>> f(treatment_stat)
0.0019386336266250209
Now, let's pick the control categories. It seems reasonable to assume there
may be an effect of cell line on the treatment outcome, which may be
attributed to differences in receptor expression. It may also be possible
that there are differences due cytokine source. Incubators were maintained
under the same conditions throughout the experiment, within one degree of
temperature difference at any given time, and the same level of CO2.
So, at least initially, let's ignore differences due to the incubator.
It's recommended that as a first pass analysis, control variables be
selected based on an idea of what may be biologically relevant to the
system, although further iteration might encourage the consideration of
variable with effect sizes similar, or larger than the variable of
interest.
>>> control_cats = ['SOURCE', 'CELL_LINE']
>>> from skbio.stats.power import subsample_paired_power
>>> pwr, cnt = subsample_paired_power(test=f,
... meta=data,
... cat='TREATMENT',
... control_cats=control_cats,
... counts_interval=5,
... num_iter=100,
... num_runs=5)
>>> cnt
array([ 5., 10., 15., 20.])
>>> pwr.mean(0)
array([ 0.196, 0.356, 0.642, 0.87 ])
>>> pwr.std(0).round(3)
array([ 0.019, 0.021, 0.044, 0.026])
Estimating off the power curve, it looks like 20 cells per group may
provide adequate power for this experiment, although the large variance
in power might suggest extending the curves or increasing the number of
samples per group.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks for the number of sampling pairs available
meta_pairs, index = _identify_sample_groups(meta, cat, control_cats, order,
strict_match)
min_obs = min([_get_min_size(meta, cat, control_cats, order, strict_match),
np.floor(len(index)*0.9)])
sub_ids = _draw_paired_samples(meta_pairs, index, min_obs)
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=sub_ids,
draw_mode='matched',
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates power instances
for id2, c in enumerate(sample_counts):
for id1 in range(num_runs):
ps = np.zeros((num_p, num_iter))
for id3 in range(num_iter):
subs = _draw_paired_samples(meta_pairs, index, c)
ps[:, id3] = test(subs)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
@experimental(as_of="0.4.0")
def confidence_bound(vec, alpha=0.05, df=None, axis=None):
r"""Calculates a confidence bound assuming a normal distribution
Parameters
----------
vec : array_like
The array of values to use in the bound calculation.
alpha : float, optional
The critical value, used for the confidence bound calculation.
df : float, optional
The degrees of freedom associated with the
distribution. If None is given, df is assumed to be the number of
elements in specified axis.
axis : positive int, optional
The axis over which to take the deviation. When axis
is None, a single value will be calculated for the whole matrix.
Returns
-------
bound : float
The confidence bound around the mean. The confidence interval is
[mean - bound, mean + bound].
"""
# Determines the number of non-nan counts
vec = np.asarray(vec)
vec_shape = vec.shape
if axis is None and len(vec_shape) == 1:
num_counts = vec_shape[0] - np.isnan(vec).sum()
elif axis is None:
num_counts = vec_shape[0] * vec_shape[1] - np.isnan(vec).sum()
else:
num_counts = vec_shape[axis] - np.isnan(vec).sum() / \
(vec_shape[0] * vec_shape[1])
# Gets the df if not supplied
if df is None:
df = num_counts - 1
# Calculates the bound
bound = scipy.stats.nanstd(vec, axis=axis) / np.sqrt(num_counts - 1) * \
scipy.stats.t.ppf(1 - alpha / 2, df)
return bound
bootstrap_power_curve_deprecation_reason = (
"Please use skbio.stats.power.subsample_power or "
"skbio.stats.power.subsample_paired_power followed by "
"confidence_bound.")
@deprecated(as_of="0.2.3-dev", until="0.4.1",
reason=bootstrap_power_curve_deprecation_reason)
def bootstrap_power_curve(test, samples, sample_counts, ratio=None,
alpha=0.05, mode='ind', num_iter=500, num_runs=10):
r"""Repeatedly calculates the power curve for a specified alpha level
Parameters
----------
test : function
The statistical test which accepts an array_like of sample ids
(list of lists or arrays) and returns a p-value.
samples : array_like
samples can be a list of lists or an array where each sublist or row in
the array corresponds to a sampled group.
sample_counts : 1-D array_like
A vector of the number of samples which should be sampled in each curve
ratio : 1-D array_like, optional
The fraction of the sample counts which should be
assigned to each
group. This must be a none-type object, or the same length as samples.
If Ratio is None, the same number of observations are drawn from
each sample.
alpha : float, optional
The default is 0.05. The critical value for calculating power.
mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
num_iter : positive int, optional
The number of p-values to generate for each point on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power_mean : 1-D array
The mean p-values from the iterations.
power_bound : vector
The variance in the p-values.
Examples
--------
Suppose we have 100 samples randomly drawn from two normal distributions,
the first with mean 0 and standard deviation 1, and the second with mean 3
and standard deviation 1.5
>>> import numpy as np
>>> np.random.seed(20)
>>> samples_1 = np.random.randn(100)
>>> samples_2 = 1.5 * np.random.randn(100) + 1
We want to test the statistical power of an independent two sample t-test
comparing the two populations. We can define an anonymous function, `f`,
to wrap the scipy function for independent t tests,
`scipy.stats.ttest_ind`. The test function will take a list of value
vectors and return a p value.
>>> from scipy.stats import ttest_ind
>>> f = lambda x: ttest_ind(x[0], x[1])[1]
Now, we can determine the statistical power, or the probability that we do
not have a false negative given that we do not have a false positive, by
varying a number of subsamples.
>>> from skbio.stats.power import bootstrap_power_curve
>>> sample_counts = np.arange(5, 80, 5)
>>> power_mean, power_bound = bootstrap_power_curve(f,
... [samples_1, samples_2],
... sample_counts)
>>> sample_counts[power_mean - power_bound.round(3) > .80].min()
20
Based on this analysis, it looks like we need at least 20 observations
from each distribution to avoid committing a type II error more than 20%
of the time.
"""
# Corrects the alpha value into a matrix
alpha = np.ones((num_runs)) * alpha
# Boot straps the power curve
power = _calculate_power_curve(test=test,
samples=samples,
sample_counts=sample_counts,
ratio=ratio,
num_iter=num_iter,
alpha=alpha,
mode=mode)
# Calculates two summary statistics
power_mean = power.mean(0)
power_bound = confidence_bound(power, alpha=alpha[0], axis=0)
# Calculates summary statistics
return power_mean, power_bound
@experimental(as_of="0.4.0")
def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
r"""Draws a list of samples varied by `cat` and matched for `control_cats`
This function is designed to provide controlled samples, based on a
metadata category. For example, one could control for age, sex, education
level, and diet type while measuring exercise frequency.
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
ids : array
a set of ids which satisfy the criteria. These are not grouped by
`cat`. An empty array indicates there are no sample ids which satisfy
the requirements.
Examples
--------
If we have a mapping file for a set of random individuals looking at
housing, sex, age and antibiotic use.
>>> import pandas as pd
>>> import numpy as np
>>> meta = {'SW': {'HOUSING': '2', 'SEX': 'M', 'AGE': np.nan, 'ABX': 'Y'},
... 'TS': {'HOUSING': '2', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'CB': {'HOUSING': '3', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'BB': {'HOUSING': '1', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'}}
>>> meta = pd.DataFrame.from_dict(meta, orient="index")
>>> meta #doctest: +SKIP
ABX HOUSING AGE SEX
BB Y 1 40s M
CB Y 3 40s M
SW Y 2 NaN M
TS Y 2 40s M
We may want to vary an individual's housing situation, while holding
constant their age, sex and antibiotic use so we can estimate the effect
size for housing, and later compare it to the effects of other variables.
>>> from skbio.stats.power import paired_subsamples
>>> ids = paired_subsamples(meta, 'HOUSING', ['SEX', 'AGE', 'ABX'])
>>> np.hstack(ids) #doctest: +ELLIPSIS
array(['BB', 'TS', 'CB']...)
So, for this set of data, we can match TS, CB, and BB based on their age,
sex, and antibiotic use. SW cannot be matched in either group because
`strict_match` was true, and there is missing AGE data for this sample.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks the groups in the category
min_obs = _get_min_size(meta, cat, control_cats, order, strict_match)
# Identifies all possible subsamples
meta_pairs, index = _identify_sample_groups(meta=meta,
cat=cat,
control_cats=control_cats,
order=order,
strict_match=strict_match)
# Draws paired ids
ids = _draw_paired_samples(meta_pairs=meta_pairs,
index=index,
num_samps=min_obs)
return ids
def _get_min_size(meta, cat, control_cats, order, strict_match):
"""Determines the smallest group represented"""
if strict_match:
all_cats = copy.deepcopy(control_cats)
all_cats.append(cat)
meta = meta[all_cats].dropna()
return meta.groupby(cat).count().loc[order, control_cats[0]].min()
def _check_nans(x, switch=False):
r"""Returns False if x is a nan and True is x is a string or number
"""
if isinstance(x, six.string_types):
return True
elif isinstance(x, (float, int)):
return not np.isnan(x)
elif switch and isinstance(x, (list, tuple)) and np.nan in x:
return False
elif switch and isinstance(x, (list, tuple)):
return True
else:
raise TypeError('input must be a string, float or a nan')
def _calculate_power(p_values, alpha=0.05):
r"""Calculates statistical power empirically
Parameters
----------
p_values : 1-D array
A 1-D numpy array with the test results.
alpha : float
The critical value for the power calculation.
Returns
-------
power : float
The empirical power, or the fraction of observed p values below the
critical value.
"""
p_values = np.atleast_2d(p_values)
w = (p_values < alpha).sum(axis=1)/p_values.shape[1]
return w
def _compare_distributions(test, samples, num_p, counts=5, mode="ind",
num_iter=100):
r"""Compares two distribution arrays iteratively
Parameters
----------
test : function
The statistical test which accepts an array_like of sample ids
(list of lists) and returns a p-value. This can be a one-dimensional
array, or a float.
samples : list of arrays
A list where each 1-d array represents a sample. If `mode` is
"matched", there must be an equal number of observations in each
sample.
num_p : positive int, optional
The number of p-values returned by the test.
counts : positive int or 1-D array, optional
The number of samples to draw from each distribution.
If this is a 1-D array, the length must correspond to the number of
samples. The function will not draw more observations than are in a
sample. In "matched" `mode`, the same number of observations will be
drawn from each group.
mode : {"ind", "matched", "paired"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
num_iter : positive int, optional
Default 1000. The number of p-values to generate for each point on the
curve.
Returns
-------
p_values : array
The p-values for the subsampled tests. If `test` returned a single
p value, p_values is a one-dimensional array. If `test` returned an
array, `p_values` has dimensions `num_iter` x `num_p`
Raises
------
ValueError
If mode is not "ind" or "matched".
ValueError
If the arrays in samples are not the same length in "matched" mode.
ValueError
If counts is a 1-D array and counts and samples are different lengths.
"""
# Prealocates the pvalue matrix
p_values = np.zeros((num_p, num_iter))
# Determines the number of samples per group
num_groups = len(samples)
samp_lens = [len(sample) for sample in samples]
if isinstance(counts, int):
counts = np.array([counts] * num_groups)
for idx in range(num_iter):
if mode == "matched":
pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
replace=False)
subs = [sample[pos] for sample in samples]
else:
subs = [np.random.choice(np.array(pop), counts[i], replace=False)
for i, pop in enumerate(samples)]
p_values[:, idx] = test(subs)
if num_p == 1:
p_values = p_values.squeeze()
return p_values
def _check_subsample_power_inputs(test, samples, draw_mode='ind', ratio=None,
max_counts=50, counts_interval=10,
min_counts=None):
r"""Makes sure that everything is sane before power calculations
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
Returns
-------
ratio : 1-D array
The fraction of the sample counts which should be assigned to each
group.
num_p : positive integer
The number of p values returned by `test`.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
"""
if draw_mode not in {'ind', 'matched'}:
raise ValueError('mode must be "matched" or "ind".')
# Determines the minimum number of ids in a category
id_counts = np.array([len(id_) for id_ in samples])
num_ids = id_counts.min()
# Determines the number of groups
num_groups = len(samples)
# Checks that "matched" mode is handled appropriately
if draw_mode == "matched":
for id_ in samples:
if not len(id_) == num_ids:
raise ValueError('Each vector in samples must be the same '
'length in "matched" draw_mode.')
# Checks the number of counts is appropriate
if min_counts is None:
min_counts = counts_interval
if (max_counts - min_counts) < counts_interval:
raise ValueError("No subsamples of the specified size can be drawn.")
# Checks the ratio argument is sane
if ratio is None or draw_mode == 'matched':
ratio = np.ones((num_groups))
else:
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
ratio_counts = np.array([id_counts[i] / ratio[i]
for i in range(num_groups)])
largest = ratio_counts.min()
# Determines the number of p values returned by the test
p_return = test(samples)
if isinstance(p_return, float):
num_p = 1
elif isinstance(p_return, np.ndarray) and len(p_return.shape) == 1:
num_p = p_return.shape[0]
else:
raise TypeError('test must return a float or one-dimensional array.')
# Calculates the same counts
sample_counts = np.arange(min_counts,
min(max_counts, largest),
counts_interval)
return ratio, num_p, sample_counts
def _identify_sample_groups(meta, cat, control_cats, order, strict_match):
"""Aggregates samples matches for `control_cats` that vary by `cat`
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
ctrl_pos : int
The location of the smallest group in `order`.
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
"""
# Sets up variables to be filled
meta_pairs = {}
index = []
i1 = 0
# Groups the data by the control groups
ctrl_groups = meta.groupby(control_cats).groups
# Identifies the samples that satisfy the control pairs
for (g, ids) in viewitems(ctrl_groups):
# If strict_match, Skips over data that has nans
if not _check_nans(g, switch=True) and strict_match:
continue
# Draws the samples that are matched for control cats
m_ids = meta.loc[ids].groupby(cat).groups
# Checks if samples from the cat groups are represented in those
# Samples
ids_vec = id_vecs = [m_ids[o] for o in order if o in
m_ids]
# If all groups are represented, the index and results are retained
if len(ids_vec) == len(order):
min_vec = np.array([len(v) for v in id_vecs])
loc_vec = np.arange(0, min_vec.min())
meta_pairs[i1] = id_vecs
index.append(np.zeros(loc_vec.shape) + i1)
i1 = i1 + 1
# If the groups are not represented, an empty array gets passed
else:
index.append(np.array([]))
# Converts index to a 1d array
index = np.hstack(index)
# If index is empty, sets up meta_paris with a no key.
if not meta_pairs:
meta_pairs['no'] = order
return meta_pairs, index
def _draw_paired_samples(meta_pairs, index, num_samps):
"""Draws a random set of ids from a matched list
Parameters
----------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
Returns
-------
ids : list
A set of randomly selected ids groups from each group.
"""
# Handles an empty paired vector
if 'no' in meta_pairs:
return [np.array([]) for o in meta_pairs['no']]
# Identifies the absolute positions of the control group being drawn
set_pos = np.random.choice(index, int(num_samps),
replace=False).astype(int)
subs = []
# Draws the other groups
for set_, num_ in viewitems(collections.Counter(set_pos)):
r2 = [np.random.choice(col, num_, replace=False) for col in
meta_pairs[set_]]
subs.append(r2)
ids = [np.hstack(ids) for ids in zip(*subs)]
return ids
def _calculate_power_curve(test, samples, sample_counts, ratio=None,
mode='ind', num_iter=1000, alpha=0.05):
r"""Generates an empirical power curve for the samples.
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values and
returns a p value.
samples : array_like
`samples` can be a list of lists or an array where each sublist or row
in the array corresponds to a sampled group.
sample_counts : 1-D array
A vector of the number of samples which should be sampled in each
curve.
mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample.
num_iter : int
The default is 1000. The number of p-values to generate for each point
on the curve.
Returns
-------
p_values : array
The p-values associated with the input sample counts.
Raises
------
ValueError
If ratio is an array and ratio is not the same length as samples
"""
# Casts array-likes to arrays
sample_counts = np.asarray(sample_counts)
# Determines the number of groups
num_groups = len(samples)
num_samps = len(sample_counts)
if isinstance(alpha, float):
vec = True
pwr = np.zeros((num_samps))
alpha = np.array([alpha])
else:
vec = False
num_crit = alpha.shape[0]
pwr = np.zeros((num_crit, num_samps))
# Checks the ratio argument
if ratio is None:
ratio = np.ones((num_groups))
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
# Loops through the sample sizes
for id2, s in enumerate(sample_counts):
count = np.round(s * ratio, 0).astype(int)
for id1, a in enumerate(alpha):
ps = _compare_distributions(test=test,
samples=samples,
counts=count,
num_p=1,
num_iter=num_iter,
mode=mode)
if vec:
pwr[id2] = _calculate_power(ps, a)
else:
pwr[id1, id2] = _calculate_power(ps, a)
return pwr
|
bsd-3-clause
|
cloud-fan/spark
|
python/pyspark/pandas/tests/test_groupby.py
|
14
|
118068
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError, DataError
from pyspark.pandas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from pyspark.pandas.groupby import is_multi_agg_with_relabel
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class GroupByTest(PandasOnSparkTestCase, TestUtils):
def test_groupby_simple(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("a").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index).sum()),
sort(pdf.groupby("a", as_index=as_index).sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index).b.sum()),
sort(pdf.groupby("a", as_index=as_index).b.sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)["b"].sum()),
sort(pdf.groupby("a", as_index=as_index)["b"].sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
sort(pdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)[[]].sum()),
sort(pdf.groupby("a", as_index=as_index)[[]].sum()),
)
self.assert_eq(
sort(psdf.groupby("a", as_index=as_index)["c"].sum()),
sort(pdf.groupby("a", as_index=as_index)["c"].sum()),
)
self.assert_eq(
psdf.groupby("a").a.sum().sort_index(), pdf.groupby("a").a.sum().sort_index()
)
self.assert_eq(
psdf.groupby("a")["a"].sum().sort_index(), pdf.groupby("a")["a"].sum().sort_index()
)
self.assert_eq(
psdf.groupby("a")[["a"]].sum().sort_index(), pdf.groupby("a")[["a"]].sum().sort_index()
)
self.assert_eq(
psdf.groupby("a")[["a", "c"]].sum().sort_index(),
pdf.groupby("a")[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b).sum().sort_index(), pdf.a.groupby(pdf.b).sum().sort_index()
)
for axis in [0, "index"]:
self.assert_eq(
psdf.groupby("a", axis=axis).a.sum().sort_index(),
pdf.groupby("a", axis=axis).a.sum().sort_index(),
)
self.assert_eq(
psdf.groupby("a", axis=axis)["a"].sum().sort_index(),
pdf.groupby("a", axis=axis)["a"].sum().sort_index(),
)
self.assert_eq(
psdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
)
self.assert_eq(
psdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b, axis=axis).sum().sort_index(),
pdf.a.groupby(pdf.b, axis=axis).sum().sort_index(),
)
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False).a)
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False)["a"])
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False)[["a"]])
self.assertRaises(ValueError, lambda: psdf.groupby("a", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: psdf.groupby("z", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: psdf.groupby(["z"], as_index=False)[["a", "c"]])
self.assertRaises(TypeError, lambda: psdf.a.groupby(psdf.b, as_index=False))
self.assertRaises(NotImplementedError, lambda: psdf.groupby("a", axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.groupby("a", axis="columns"))
self.assertRaises(ValueError, lambda: psdf.groupby("a", "b"))
self.assertRaises(TypeError, lambda: psdf.a.groupby(psdf.a, psdf.b))
# we can't use column name/names as a parameter `by` for `SeriesGroupBy`.
self.assertRaises(KeyError, lambda: psdf.a.groupby(by="a"))
self.assertRaises(KeyError, lambda: psdf.a.groupby(by=["a", "b"]))
self.assertRaises(KeyError, lambda: psdf.a.groupby(by=("a", "b")))
# we can't use DataFrame as a parameter `by` for `DataFrameGroupBy`/`SeriesGroupBy`.
self.assertRaises(ValueError, lambda: psdf.groupby(psdf))
self.assertRaises(ValueError, lambda: psdf.a.groupby(psdf))
self.assertRaises(ValueError, lambda: psdf.a.groupby((psdf,)))
# non-string names
pdf = pd.DataFrame(
{
10: [1, 2, 6, 4, 4, 6, 4, 3, 7],
20: [4, 2, 7, 3, 3, 1, 1, 1, 2],
30: [4, 2, 7, 3, None, 1, 1, 1, 2],
40: list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(10).reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby(10, as_index=as_index).sum()),
sort(pdf.groupby(10, as_index=as_index).sum()),
)
self.assert_eq(
sort(psdf.groupby(10, as_index=as_index)[20].sum()),
sort(pdf.groupby(10, as_index=as_index)[20].sum()),
)
self.assert_eq(
sort(psdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
sort(pdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
)
def test_groupby_multiindex_columns(self):
pdf = pd.DataFrame(
{
(10, "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7],
(10, "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2],
(20, "c"): [4, 2, 7, 3, None, 1, 1, 1, 2],
(30, "d"): list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby((10, "a")).sum().sort_index(), pdf.groupby((10, "a")).sum().sort_index()
)
self.assert_eq(
psdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
pdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
)
self.assert_eq(
psdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
pdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
)
# TODO: a pandas bug?
# expected = pdf.groupby((10, "a"))[(20, "c")].sum().sort_index()
expected = pd.Series(
[4.0, 2.0, 1.0, 4.0, 8.0, 2.0],
name=(20, "c"),
index=pd.Index([1, 2, 3, 4, 6, 7], name=(10, "a")),
)
self.assert_eq(psdf.groupby((10, "a"))[(20, "c")].sum().sort_index(), expected)
if (
LooseVersion(pd.__version__) >= LooseVersion("1.0.4")
and LooseVersion(pd.__version__) != LooseVersion("1.1.3")
and LooseVersion(pd.__version__) != LooseVersion("1.1.4")
):
self.assert_eq(
psdf[(20, "c")].groupby(psdf[(10, "a")]).sum().sort_index(),
pdf[(20, "c")].groupby(pdf[(10, "a")]).sum().sort_index(),
)
else:
# Due to pandas bugs resolved in 1.0.4, re-introduced in 1.1.3 and resolved in 1.1.5
self.assert_eq(psdf[(20, "c")].groupby(psdf[(10, "a")]).sum().sort_index(), expected)
def test_split_apply_combine_on_series(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
psdf = ps.from_pandas(pdf)
funcs = [
((True, False), ["sum", "min", "max", "count", "first", "last"]),
((True, True), ["mean"]),
((False, False), ["var", "std"]),
]
funcs = [(check_exact, almost, f) for (check_exact, almost), fs in funcs for f in fs]
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for check_exact, almost, func in funcs:
for kkey, pkey in [("b", "b"), (psdf.b, pdf.b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
if as_index is True or func != "std":
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
else:
# seems like a pandas' bug for as_index=False and func == "std"?
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index).a, func)()),
sort(pdf.groupby(pkey, as_index=True).a.std().reset_index()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index), func)()),
sort(pdf.groupby(pkey, as_index=True).std().reset_index()),
check_exact=check_exact,
almost=almost,
)
for kkey, pkey in [(psdf.b + 1, pdf.b + 1), (psdf.copy().b, pdf.copy().b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for i in [0, 4, 7]:
with self.subTest(as_index=as_index, func=func, i=i):
self.assert_eq(
sort(getattr(psdf.groupby(psdf.b > i, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(psdf.groupby(psdf.b > i, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for kkey, pkey in [
(psdf.b, pdf.b),
(psdf.b + 1, pdf.b + 1),
(psdf.copy().b, pdf.copy().b),
(psdf.b.rename(), pdf.b.rename()),
]:
with self.subTest(func=func, key=pkey):
self.assert_eq(
getattr(psdf.a.groupby(kkey), func)().sort_index(),
getattr(pdf.a.groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((psdf.a + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.a + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((psdf.b + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.b + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr(psdf.a.rename().groupby(kkey), func)().sort_index(),
getattr(pdf.a.rename().groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
def test_aggregate(self):
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for kkey, pkey in [("A", "A"), (psdf.A, pdf.A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
psdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
if as_index:
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
else:
# seems like a pandas' bug for as_index=False and func_or_funcs is list?
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=True).agg(["sum"]).reset_index()),
)
for kkey, pkey in [(psdf.A + 1, pdf.A + 1), (psdf.copy().A, pdf.copy().A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
psdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
self.assert_eq(
sort(psdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
expected_error_message = (
r"aggs must be a dict mapping from column name to aggregate functions "
r"\(string or list of strings\)."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.groupby("A", as_index=as_index).agg(0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([(10, "A"), (10, "B"), (20, "C")])
pdf.columns = columns
psdf.columns = columns
for as_index in [True, False]:
stats_psdf = psdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
stats_pdf = pdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
self.assert_eq(
stats_psdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
stats_pdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
)
stats_psdf = psdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
stats_pdf = pdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
self.assert_eq(
stats_psdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
stats_pdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
)
# non-string names
pdf.columns = [10, 20, 30]
psdf.columns = [10, 20, 30]
for as_index in [True, False]:
stats_psdf = psdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
stats_pdf = pdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
self.assert_eq(
stats_psdf.sort_values(by=[20, 30]).reset_index(drop=True),
stats_pdf.sort_values(by=[20, 30]).reset_index(drop=True),
)
stats_psdf = psdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
stats_pdf = pdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
self.assert_eq(
stats_psdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
stats_pdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
)
def test_aggregate_func_str_list(self):
# this is test for cases where only string or list is assigned
pdf = pd.DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
psdf = ps.from_pandas(pdf)
agg_funcs = ["max", "min", ["min", "max"]]
for aggfunc in agg_funcs:
# Since in Koalas groupby, the order of rows might be different
# so sort on index to ensure they have same output
sorted_agg_psdf = psdf.groupby("kind").agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby("kind").agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_psdf, sorted_agg_pdf)
# test on multi index column case
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
for aggfunc in agg_funcs:
sorted_agg_psdf = psdf.groupby(("X", "A")).agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby(("X", "A")).agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_psdf, sorted_agg_pdf)
@unittest.skipIf(pd.__version__ < "0.25.0", "not supported before pandas 0.25.0")
def test_aggregate_relabel(self):
# this is to test named aggregation in groupby
pdf = pd.DataFrame({"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]})
psdf = ps.from_pandas(pdf)
# different agg column, same function
agg_pdf = pdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
agg_psdf = psdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
self.assert_eq(agg_pdf, agg_psdf)
# same agg column, different functions
agg_pdf = pdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
agg_psdf = psdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
self.assert_eq(agg_pdf, agg_psdf)
# test on NamedAgg
agg_pdf = (
pdf.groupby("group").agg(b_max=pd.NamedAgg(column="B", aggfunc="max")).sort_index()
)
agg_psdf = (
psdf.groupby("group").agg(b_max=ps.NamedAgg(column="B", aggfunc="max")).sort_index()
)
self.assert_eq(agg_psdf, agg_pdf)
# test on NamedAgg multi columns aggregation
agg_pdf = (
pdf.groupby("group")
.agg(
b_max=pd.NamedAgg(column="B", aggfunc="max"),
b_min=pd.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
agg_psdf = (
psdf.groupby("group")
.agg(
b_max=ps.NamedAgg(column="B", aggfunc="max"),
b_min=ps.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
self.assert_eq(agg_psdf, agg_pdf)
def test_dropna(self):
pdf = pd.DataFrame(
{"A": [None, 1, None, 1, 2], "B": [1, 2, 3, None, None], "C": [4, 5, 6, 7, None]}
)
psdf = ps.from_pandas(pdf)
# pd.DataFrame.groupby with dropna parameter is implemented since pandas 1.1.0
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=dropna).std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).std()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
)
self.assert_eq(
sort(
psdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
sort(
pdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
)
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
psdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
sort(
pdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
sorted_stats_psdf = sort(
psdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
sorted_stats_pdf = sort(
pdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
self.assert_eq(sorted_stats_psdf, sorted_stats_pdf)
else:
# Testing dropna=True (pandas default behavior)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index, dropna=True)["B"].min()),
sort(pdf.groupby("A", as_index=as_index)["B"].min()),
)
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
psdf.groupby(["A", "B"], as_index=as_index, dropna=True).agg(
{"C": ["min", "std"]}
)
),
sort(pdf.groupby(["A", "B"], as_index=as_index).agg({"C": ["min", "std"]})),
almost=True,
)
# Testing dropna=False
index = pd.Index([1.0, 2.0, np.nan], name="A")
expected = pd.Series([2.0, np.nan, 1.0], index=index, name="B")
result = psdf.groupby("A", as_index=True, dropna=False)["B"].min().sort_index()
self.assert_eq(expected, result)
expected = pd.DataFrame({"A": [1.0, 2.0, np.nan], "B": [2.0, np.nan, 1.0]})
result = (
psdf.groupby("A", as_index=False, dropna=False)["B"]
.min()
.sort_values("A")
.reset_index(drop=True)
)
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[(1.0, 2.0), (1.0, None), (2.0, None), (None, 1.0), (None, 3.0)], names=["A", "B"]
)
expected = pd.DataFrame(
{
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
},
index=index,
)
result = (
psdf.groupby(["A", "B"], as_index=True, dropna=False)
.agg({"C": ["min", "std"]})
.sort_index()
)
self.assert_eq(expected, result)
expected = pd.DataFrame(
{
("A", ""): [1.0, 1.0, 2.0, np.nan, np.nan],
("B", ""): [2.0, np.nan, np.nan, 1.0, 3.0],
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
result = (
psdf.groupby(["A", "B"], as_index=False, dropna=False)
.agg({"C": ["min", "std"]})
.sort_values(["A", "B"])
.reset_index(drop=True)
)
self.assert_eq(expected, result)
def test_describe(self):
# support for numeric type, not support for string type yet
datas = []
datas.append({"a": [1, 1, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
datas.append({"a": [-1, -1, -3], "b": [-4, -5, -6], "c": [-7, -8, -9]})
datas.append({"a": [0, 0, 0], "b": [0, 0, 0], "c": [0, 8, 0]})
# it is okay if string type column as a group key
datas.append({"a": ["a", "a", "c"], "b": [4, 5, 6], "c": [7, 8, 9]})
percentiles = [0.25, 0.5, 0.75]
formatted_percentiles = ["25%", "50%", "75%"]
non_percentile_stats = ["count", "mean", "std", "min", "max"]
for data in datas:
pdf = pd.DataFrame(data)
psdf = ps.from_pandas(pdf)
describe_pdf = pdf.groupby("a").describe().sort_index()
describe_psdf = psdf.groupby("a").describe().sort_index()
# since the result of percentile columns are slightly difference from pandas,
# we should check them separately: non-percentile columns & percentile columns
# 1. Check that non-percentile columns are equal.
agg_cols = [col.name for col in psdf.groupby("a")._agg_columns]
self.assert_eq(
describe_psdf.drop(list(product(agg_cols, formatted_percentiles))),
describe_pdf.drop(columns=formatted_percentiles, level=1),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby("a").quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_psdf.drop(list(product(agg_cols, non_percentile_stats))),
quantile_pdf.rename(columns="{:.0%}".format, level=1),
)
# not support for string type yet
datas = []
datas.append({"a": ["a", "a", "c"], "b": ["d", "e", "f"], "c": ["g", "h", "i"]})
datas.append({"a": ["a", "a", "c"], "b": [4, 0, 1], "c": ["g", "h", "i"]})
for data in datas:
pdf = pd.DataFrame(data)
psdf = ps.from_pandas(pdf)
self.assertRaises(
NotImplementedError, lambda: psdf.groupby("a").describe().sort_index()
)
# multi-index columns
pdf = pd.DataFrame({("x", "a"): [1, 1, 3], ("x", "b"): [4, 5, 6], ("y", "c"): [7, 8, 9]})
psdf = ps.from_pandas(pdf)
describe_pdf = pdf.groupby(("x", "a")).describe().sort_index()
describe_psdf = psdf.groupby(("x", "a")).describe().sort_index()
# 1. Check that non-percentile columns are equal.
agg_column_labels = [col._column_label for col in psdf.groupby(("x", "a"))._agg_columns]
self.assert_eq(
describe_psdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, formatted_percentiles)
]
),
describe_pdf.drop(columns=formatted_percentiles, level=2),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby(("x", "a")).quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_psdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, non_percentile_stats)
]
),
quantile_pdf.rename(columns="{:.0%}".format, level=2),
)
def test_aggregate_relabel_multiindex(self):
pdf = pd.DataFrame({"A": [0, 1, 2, 3], "B": [5, 6, 7, 8], "group": ["a", "a", "b", "b"]})
pdf.columns = pd.MultiIndex.from_tuples([("y", "A"), ("y", "B"), ("x", "group")])
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = pdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
agg_psdf = psdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
self.assert_eq(agg_pdf, agg_psdf)
# same column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_psdf = (
psdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_psdf)
# different column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [6, 8], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_psdf = (
psdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_psdf)
def test_all_any(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
"B": [True, True, True, False, False, False, None, True, None, False],
}
)
psdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).all()),
sort(pdf.groupby("A", as_index=as_index).all()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).any()),
sort(pdf.groupby("A", as_index=as_index).any()),
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).all()).B,
sort(pdf.groupby("A", as_index=as_index).all()).B,
)
self.assert_eq(
sort(psdf.groupby("A", as_index=as_index).any()).B,
sort(pdf.groupby("A", as_index=as_index).any()).B,
)
self.assert_eq(
psdf.B.groupby(psdf.A).all().sort_index(), pdf.B.groupby(pdf.A).all().sort_index()
)
self.assert_eq(
psdf.B.groupby(psdf.A).any().sort_index(), pdf.B.groupby(pdf.A).any().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
psdf.columns = columns
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
self.assert_eq(
sort(psdf.groupby(("X", "A"), as_index=as_index).all()),
sort(pdf.groupby(("X", "A"), as_index=as_index).all()),
)
self.assert_eq(
sort(psdf.groupby(("X", "A"), as_index=as_index).any()),
sort(pdf.groupby(("X", "A"), as_index=as_index).any()),
)
def test_raises(self):
psdf = ps.DataFrame(
{"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
# test raises with incorrect key
self.assertRaises(ValueError, lambda: psdf.groupby([]))
self.assertRaises(KeyError, lambda: psdf.groupby("x"))
self.assertRaises(KeyError, lambda: psdf.groupby(["a", "x"]))
self.assertRaises(KeyError, lambda: psdf.groupby("a")["x"])
self.assertRaises(KeyError, lambda: psdf.groupby("a")["b", "x"])
self.assertRaises(KeyError, lambda: psdf.groupby("a")[["b", "x"]])
def test_nunique(self):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("a").agg({"b": "nunique"}).sort_index(),
pdf.groupby("a").agg({"b": "nunique"}).sort_index(),
)
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ps.DataFrame({"b": [2, 2]}, index=pd.Index([0, 1], name="a"))
self.assert_eq(psdf.groupby("a").nunique().sort_index(), expected)
self.assert_eq(
psdf.groupby("a").nunique(dropna=False).sort_index(),
expected,
)
else:
self.assert_eq(
psdf.groupby("a").nunique().sort_index(), pdf.groupby("a").nunique().sort_index()
)
self.assert_eq(
psdf.groupby("a").nunique(dropna=False).sort_index(),
pdf.groupby("a").nunique(dropna=False).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].nunique().sort_index(),
pdf.groupby("a")["b"].nunique().sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
pdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
)
nunique_psdf = psdf.groupby("a", as_index=False).agg({"b": "nunique"})
nunique_pdf = pdf.groupby("a", as_index=False).agg({"b": "nunique"})
self.assert_eq(
nunique_psdf.sort_values(["a", "b"]).reset_index(drop=True),
nunique_pdf.sort_values(["a", "b"]).reset_index(drop=True),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ps.DataFrame({("y", "b"): [2, 2]}, index=pd.Index([0, 1], name=("x", "a")))
self.assert_eq(
psdf.groupby(("x", "a")).nunique().sort_index(),
expected,
)
self.assert_eq(
psdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
expected,
)
else:
self.assert_eq(
psdf.groupby(("x", "a")).nunique().sort_index(),
pdf.groupby(("x", "a")).nunique().sort_index(),
)
self.assert_eq(
psdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
pdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
)
def test_unique(self):
for pdf in [
pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
),
pd.DataFrame(
{
"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
"b": ["w", "w", "w", "x", "x", "y", "y", "z", "z", "z"],
}
),
]:
with self.subTest(pdf=pdf):
psdf = ps.from_pandas(pdf)
actual = psdf.groupby("a")["b"].unique().sort_index().to_pandas()
expect = pdf.groupby("a")["b"].unique().sort_index()
self.assert_eq(len(actual), len(expect))
for act, exp in zip(actual, expect):
self.assertTrue(sorted(act) == sorted(exp))
def test_value_counts(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]}, columns=["A", "B"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("A")["B"].value_counts().sort_index(),
pdf.groupby("A")["B"].value_counts().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
)
self.assert_eq(
psdf.B.rename().groupby(psdf.A).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A).value_counts().sort_index(),
)
self.assert_eq(
psdf.B.groupby(psdf.A.rename()).value_counts().sort_index(),
pdf.B.groupby(pdf.A.rename()).value_counts().sort_index(),
)
self.assert_eq(
psdf.B.rename().groupby(psdf.A.rename()).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A.rename()).value_counts().sort_index(),
)
def test_size(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.groupby("A").size().sort_index(), pdf.groupby("A").size().sort_index())
self.assert_eq(
psdf.groupby("A")["B"].size().sort_index(), pdf.groupby("A")["B"].size().sort_index()
)
self.assert_eq(
psdf.groupby("A")[["B"]].size().sort_index(),
pdf.groupby("A")[["B"]].size().sort_index(),
)
self.assert_eq(
psdf.groupby(["A", "B"]).size().sort_index(),
pdf.groupby(["A", "B"]).size().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("X", "A")).size().sort_index(),
pdf.groupby(("X", "A")).size().sort_index(),
)
self.assert_eq(
psdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
pdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
)
def test_diff(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.groupby("b").diff().sort_index(), pdf.groupby("b").diff().sort_index())
self.assert_eq(
psdf.groupby(["a", "b"]).diff().sort_index(),
pdf.groupby(["a", "b"]).diff().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].diff().sort_index(),
pdf.groupby(["b"])["a"].diff().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
pdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).diff().sort_index(),
pdf.groupby(pdf.b // 5).diff().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].diff().sort_index(),
pdf.groupby(pdf.b // 5)["a"].diff().sort_index(),
)
self.assert_eq(psdf.groupby("b").diff().sum(), pdf.groupby("b").diff().sum().astype(int))
self.assert_eq(psdf.groupby(["b"])["a"].diff().sum(), pdf.groupby(["b"])["a"].diff().sum())
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).diff().sort_index(),
pdf.groupby(("x", "b")).diff().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
)
def test_rank(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.groupby("b").rank().sort_index(), pdf.groupby("b").rank().sort_index())
self.assert_eq(
psdf.groupby(["a", "b"]).rank().sort_index(),
pdf.groupby(["a", "b"]).rank().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].rank().sort_index(),
pdf.groupby(["b"])["a"].rank().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
pdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).rank().sort_index(),
pdf.groupby(pdf.b // 5).rank().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].rank().sort_index(),
pdf.groupby(pdf.b // 5)["a"].rank().sort_index(),
)
self.assert_eq(psdf.groupby("b").rank().sum(), pdf.groupby("b").rank().sum())
self.assert_eq(psdf.groupby(["b"])["a"].rank().sum(), pdf.groupby(["b"])["a"].rank().sum())
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).rank().sort_index(),
pdf.groupby(("x", "b")).rank().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
)
def test_cumcount(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
for ascending in [True, False]:
self.assert_eq(
psdf.groupby("b").cumcount(ascending=ascending).sort_index(),
pdf.groupby("b").cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
pdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby("b").cumcount(ascending=ascending).sum(),
pdf.groupby("b").cumcount(ascending=ascending).sum(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
for ascending in [True, False]:
self.assert_eq(
psdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
pdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
)
def test_cummin(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cummin().sort_index(), pdf.groupby("b").cummin().sort_index()
)
self.assert_eq(
psdf.groupby(["a", "b"]).cummin().sort_index(),
pdf.groupby(["a", "b"]).cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cummin().sort_index(),
pdf.groupby(["b"])["a"].cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cummin().sort_index(),
pdf.groupby(pdf.b // 5).cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cummin().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummin().sort_index(),
)
self.assert_eq(
psdf.groupby("b").cummin().sum().sort_index(),
pdf.groupby("b").cummin().sum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b).cummin().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cummin().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummin().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummin().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cummin().sort_index(),
pdf.groupby(("x", "b")).cummin().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cummin())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cummin())
def test_cummax(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cummax().sort_index(), pdf.groupby("b").cummax().sort_index()
)
self.assert_eq(
psdf.groupby(["a", "b"]).cummax().sort_index(),
pdf.groupby(["a", "b"]).cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cummax().sort_index(),
pdf.groupby(["b"])["a"].cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cummax().sort_index(),
pdf.groupby(pdf.b // 5).cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cummax().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummax().sort_index(),
)
self.assert_eq(
psdf.groupby("b").cummax().sum().sort_index(),
pdf.groupby("b").cummax().sum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b).cummax().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cummax().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummax().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummax().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cummax().sort_index(),
pdf.groupby(("x", "b")).cummax().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cummax())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cummax())
def test_cumsum(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cumsum().sort_index(), pdf.groupby("b").cumsum().sort_index()
)
self.assert_eq(
psdf.groupby(["a", "b"]).cumsum().sort_index(),
pdf.groupby(["a", "b"]).cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["a"].cumsum().sort_index(),
pdf.groupby(["b"])["a"].cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).cumsum().sort_index(),
pdf.groupby(pdf.b // 5).cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].cumsum().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby("b").cumsum().sum().sort_index(),
pdf.groupby("b").cumsum().sum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b).cumsum().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cumsum().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumsum().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumsum().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cumsum().sort_index(),
pdf.groupby(("x", "b")).cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cumsum())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cumsum())
def test_cumprod(self):
pdf = pd.DataFrame(
{
"a": [1, 2, -3, 4, -5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 0, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").cumprod().sort_index(),
pdf.groupby("b").cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(["a", "b"]).cumprod().sort_index(),
pdf.groupby(["a", "b"]).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(["b"])["a"].cumprod().sort_index(),
pdf.groupby(["b"])["a"].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(psdf.b // 3).cumprod().sort_index(),
pdf.groupby(pdf.b // 3).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby(psdf.b // 3)["a"].cumprod().sort_index(),
pdf.groupby(pdf.b // 3)["a"].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby("b").cumprod().sum().sort_index(),
pdf.groupby("b").cumprod().sum().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).cumprod().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumprod().sort_index(),
check_exact=False,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).cumprod().sort_index(),
pdf.groupby(("x", "b")).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
check_exact=False,
)
psdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"]).cumprod())
psdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: psdf.groupby(["A"])["B"].cumprod())
def test_nsmallest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
pdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
)
self.assert_eq(
psdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
pdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
)
self.assert_eq(
(psdf.b * 10).groupby(psdf.a).nsmallest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
psdf.b.groupby(psdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nsmallest do not support multi-index now"):
psdf.set_index(["a", "b"]).groupby(["c"])["d"].nsmallest(1)
def test_nlargest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby(["a"])["b"].nlargest(1).sort_values(),
pdf.groupby(["a"])["b"].nlargest(1).sort_values(),
)
self.assert_eq(
psdf.groupby(["a"])["b"].nlargest(2).sort_index(),
pdf.groupby(["a"])["b"].nlargest(2).sort_index(),
)
self.assert_eq(
(psdf.b * 10).groupby(psdf.a).nlargest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
psdf.b.groupby(psdf.a.rename()).nlargest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
self.assert_eq(
psdf.b.rename().groupby(psdf.a.rename()).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nlargest do not support multi-index now"):
psdf.set_index(["a", "b"]).groupby(["c"])["d"].nlargest(1)
def test_fillna(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("A").fillna(0).sort_index(), pdf.groupby("A").fillna(0).sort_index()
)
self.assert_eq(
psdf.groupby("A")["C"].fillna(0).sort_index(),
pdf.groupby("A")["C"].fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby("A")[["C"]].fillna(0).sort_index(),
pdf.groupby("A")[["C"]].fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby("A").fillna(method="bfill").sort_index(),
pdf.groupby("A").fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
pdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby("A").fillna(method="ffill").sort_index(),
pdf.groupby("A").fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
pdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5).fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)["C"].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5).fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)["C"].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.C.rename().groupby(psdf.A).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A).fillna(0).sort_index(),
)
self.assert_eq(
psdf.C.groupby(psdf.A.rename()).fillna(0).sort_index(),
pdf.C.groupby(pdf.A.rename()).fillna(0).sort_index(),
)
self.assert_eq(
psdf.C.rename().groupby(psdf.A.rename()).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A.rename()).fillna(0).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("X", "A")).fillna(0).sort_index(),
pdf.groupby(("X", "A")).fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
)
def test_ffill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby("A").ffill().sort_index(),
pdf.groupby("A").ffill().sort_index().drop("A", 1),
)
self.assert_eq(
psdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
psdf.groupby("A").ffill().sort_index(), pdf.groupby("A").ffill().sort_index()
)
self.assert_eq(
psdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].ffill().sort_index(), pdf.groupby("A")["B"].ffill().sort_index()
)
self.assert_eq(
psdf.groupby("A")["B"].ffill()[idx[6]], pdf.groupby("A")["B"].ffill()[idx[6]]
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
psdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index(),
)
def test_bfill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby("A").bfill().sort_index(),
pdf.groupby("A").bfill().sort_index().drop("A", 1),
)
self.assert_eq(
psdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
psdf.groupby("A").bfill().sort_index(), pdf.groupby("A").bfill().sort_index()
)
self.assert_eq(
psdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].bfill().sort_index(),
pdf.groupby("A")["B"].bfill().sort_index(),
)
self.assert_eq(
psdf.groupby("A")["B"].bfill()[idx[6]], pdf.groupby("A")["B"].bfill()[idx[6]]
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
psdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
psdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index(),
)
@unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0")
def test_shift(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 2, 2, 3, 3] * 3,
"b": [1, 1, 2, 2, 3, 4] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("a").shift().sort_index(), pdf.groupby("a").shift().sort_index()
)
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(psdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index(),
# pdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index())
self.assert_eq(
psdf.groupby(["b"])["a"].shift().sort_index(),
pdf.groupby(["b"])["a"].shift().sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"])["c"].shift().sort_index(),
pdf.groupby(["a", "b"])["c"].shift().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).shift().sort_index(),
pdf.groupby(pdf.b // 5).shift().sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].shift().sort_index(),
pdf.groupby(pdf.b // 5)["a"].shift().sort_index(),
)
# TODO: known pandas' bug when fill_value is not None pandas>=1.0.0
# https://github.com/pandas-dev/pandas/issues/31971#issue-565171762
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(
psdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
pdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).shift().sort_index(),
pdf.a.rename().groupby(pdf.b).shift().sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).shift().sort_index(),
pdf.a.groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).shift().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(psdf.groupby("a").shift().sum(), pdf.groupby("a").shift().sum().astype(int))
self.assert_eq(
psdf.a.rename().groupby(psdf.b).shift().sum(),
pdf.a.rename().groupby(pdf.b).shift().sum(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "a")).shift().sort_index(),
pdf.groupby(("x", "a")).shift().sort_index(),
)
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(psdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index(),
# pdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index())
def test_apply(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(len).sort_index(),
pdf.groupby("b").apply(len).sort_index(),
)
self.assert_eq(
psdf.groupby("b")["a"]
.apply(lambda x, y, z: x + x.min() + y * z, 10, z=20)
.sort_index(),
pdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
)
self.assert_eq(
psdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"])
.apply(lambda x, y, z: x + x.min() + y + z, 1, z=2)
.sort_index(),
pdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
pdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["c"].apply(len).sort_index(),
pdf.groupby(["b"])["c"].apply(len).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)[["a"]].apply(len).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(len).sort_index(),
almost=True,
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
psdf.groupby("b").apply(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
pdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(("x", "b")).apply(len).sort_index(),
pdf.groupby(("x", "b")).apply(len).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
)
def test_apply_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply()
def test_apply_negative(self):
def func(_) -> ps.Series[int]:
return pd.Series([1])
with self.assertRaisesRegex(TypeError, "Series as a return type hint at frame groupby"):
ps.range(10).groupby("id").apply(func)
def test_apply_with_new_dataframe(self):
pdf = pd.DataFrame(
{"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5], "car_id": ["A", "A", "A", "B", "B"]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
psdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
# dataframe with 1000+ records
pdf = pd.DataFrame(
{
"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5] * 300,
"car_id": ["A", "A", "A", "B", "B"] * 300,
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
psdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
def test_apply_with_new_dataframe_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply_with_new_dataframe()
def test_apply_key_handling(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
with ps.option_context("compute.shortcut_limit", 1):
self.assert_eq(
psdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
def test_apply_with_side_effect(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
psdf = ps.from_pandas(pdf)
acc = ps.utils.default_session().sparkContext.accumulator(0)
def sum_with_acc_frame(x) -> ps.DataFrame[np.float64, np.float64]:
nonlocal acc
acc += 1
return np.sum(x)
actual = psdf.groupby("d").apply(sum_with_acc_frame).sort_index()
actual.columns = ["d", "v"]
self.assert_eq(actual, pdf.groupby("d").apply(sum).sort_index().reset_index(drop=True))
self.assert_eq(acc.value, 2)
def sum_with_acc_series(x) -> np.float64:
nonlocal acc
acc += 1
return np.sum(x)
self.assert_eq(
psdf.groupby("d")["v"].apply(sum_with_acc_series).sort_index(),
pdf.groupby("d")["v"].apply(sum).sort_index().reset_index(drop=True),
)
self.assert_eq(acc.value, 4)
def test_transform(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
)
def test_transform_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_transform()
def test_filter(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(psdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.a.groupby(psdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.a.rename().groupby(psdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
psdf.groupby("b").filter(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
pdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
pdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
)
def test_idxmax(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmax().sort_index(), psdf.groupby(["a"]).idxmax().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
psdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmax().sort_index(),
psdf.groupby(["a"])["b"].idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmax().sort_index(),
psdf.b.rename().groupby(psdf.a).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmax().sort_index(),
psdf.b.groupby(psdf.a.rename()).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmax().sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).idxmax().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmax only support one-level index now"):
psdf.set_index(["a", "b"]).groupby(["c"]).idxmax()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmax().sort_index(),
psdf.groupby(("x", "a")).idxmax().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
psdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
)
def test_idxmin(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmin().sort_index(), psdf.groupby(["a"]).idxmin().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
psdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmin().sort_index(),
psdf.groupby(["a"])["b"].idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmin().sort_index(),
psdf.b.rename().groupby(psdf.a).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmin().sort_index(),
psdf.b.groupby(psdf.a.rename()).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmin().sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).idxmin().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmin only support one-level index now"):
psdf.set_index(["a", "b"]).groupby(["c"]).idxmin()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmin().sort_index(),
psdf.groupby(("x", "a")).idxmin().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
psdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
)
def test_head(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").head(2).sort_index(), psdf.groupby("a").head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), psdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), psdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), psdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(),
psdf.groupby("a")["b"].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
psdf.groupby("a")["b"].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(2).sort_index(),
psdf.groupby("a")[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(-2).sort_index(),
psdf.groupby("a")[["b"]].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(100000).sort_index(),
psdf.groupby("a")[["b"]].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).head(2).sort_index(),
psdf.groupby(psdf.a // 2).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].head(2).sort_index(),
psdf.groupby(psdf.a // 2)["b"].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].head(2).sort_index(),
psdf.groupby(psdf.a // 2)[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).head(2).sort_index(),
psdf.b.rename().groupby(psdf.a).head(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).head(2).sort_index(),
psdf.b.groupby(psdf.a.rename()).head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).head(2).sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).head(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").head(2).sort_index(), psdf.groupby("a").head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), psdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), psdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), psdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(),
psdf.groupby("a")["b"].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
psdf.groupby("a")["b"].head(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).head(2).sort_index(),
psdf.groupby(("x", "a")).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(-2).sort_index(),
psdf.groupby(("x", "a")).head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(100000).sort_index(),
psdf.groupby(("x", "a")).head(100000).sort_index(),
)
def test_missing(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# DataFrameGroupBy functions
missing_functions = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a"), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.groupby("a"), name)()
# SeriesGroupBy functions
missing_functions = inspect.getmembers(MissingPandasLikeSeriesGroupBy, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.a.groupby(psdf.a), name)()
# DataFrameGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a"), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.groupby("a"), name)
# SeriesGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeSeriesGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(psdf.a.groupby(psdf.a), name)
@staticmethod
def test_is_multi_agg_with_relabel():
assert is_multi_agg_with_relabel(a="max") is False
assert is_multi_agg_with_relabel(a_min=("a", "max"), a_max=("a", "min")) is True
def test_get_group(self):
pdf = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby("class").get_group("bird"),
pdf.groupby("class").get_group("bird"),
)
self.assert_eq(
psdf.groupby("class")["name"].get_group("mammal"),
pdf.groupby("class")["name"].get_group("mammal"),
)
self.assert_eq(
psdf.groupby("class")[["name"]].get_group("mammal"),
pdf.groupby("class")[["name"]].get_group("mammal"),
)
self.assert_eq(
psdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
pdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
)
self.assert_eq(
psdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
)
self.assert_eq(
psdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
)
self.assert_eq(
(psdf.max_speed + 1).groupby(psdf["class"]).get_group("mammal"),
(pdf.max_speed + 1).groupby(pdf["class"]).get_group("mammal"),
)
self.assert_eq(
psdf.groupby("max_speed").get_group(80.5),
pdf.groupby("max_speed").get_group(80.5),
)
self.assertRaises(KeyError, lambda: psdf.groupby("class").get_group("fish"))
self.assertRaises(TypeError, lambda: psdf.groupby("class").get_group(["bird", "mammal"]))
self.assertRaises(KeyError, lambda: psdf.groupby("class")["name"].get_group("fish"))
self.assertRaises(
TypeError, lambda: psdf.groupby("class")["name"].get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: psdf.groupby(["class", "name"]).get_group(("lion", "mammal"))
)
self.assertRaises(ValueError, lambda: psdf.groupby(["class", "name"]).get_group(("lion",)))
self.assertRaises(
ValueError, lambda: psdf.groupby(["class", "name"]).get_group(("mammal",))
)
self.assertRaises(ValueError, lambda: psdf.groupby(["class", "name"]).get_group("mammal"))
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("A", "name"), ("B", "class"), ("C", "max_speed")])
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.groupby(("B", "class")).get_group("bird"),
pdf.groupby(("B", "class")).get_group("bird"),
)
self.assert_eq(
psdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
pdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
)
self.assert_eq(
psdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
pdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
)
self.assert_eq(
psdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
pdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
)
self.assert_eq(
(psdf[("C", "max_speed")] + 1).groupby(psdf[("B", "class")]).get_group("mammal"),
(pdf[("C", "max_speed")] + 1).groupby(pdf[("B", "class")]).get_group("mammal"),
)
self.assert_eq(
psdf.groupby(("C", "max_speed")).get_group(80.5),
pdf.groupby(("C", "max_speed")).get_group(80.5),
)
self.assertRaises(KeyError, lambda: psdf.groupby(("B", "class")).get_group("fish"))
self.assertRaises(
TypeError, lambda: psdf.groupby(("B", "class")).get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: psdf.groupby(("B", "class"))[("A", "name")].get_group("fish")
)
self.assertRaises(
KeyError,
lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion", "mammal")),
)
self.assertRaises(
ValueError,
lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion",)),
)
self.assertRaises(
ValueError, lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal",))
)
self.assertRaises(
ValueError, lambda: psdf.groupby([("B", "class"), ("A", "name")]).get_group("mammal")
)
def test_median(self):
psdf = ps.DataFrame(
{
"a": [1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0],
"b": [2.0, 3.0, 1.0, 4.0, 6.0, 9.0, 8.0, 10.0, 7.0, 5.0],
"c": [3.0, 5.0, 2.0, 5.0, 1.0, 2.0, 6.0, 4.0, 3.0, 6.0],
},
columns=["a", "b", "c"],
index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6],
)
# DataFrame
expected_result = ps.DataFrame(
{"b": [2.0, 8.0, 7.0], "c": [3.0, 2.0, 4.0]}, index=pd.Index([1.0, 2.0, 3.0], name="a")
)
self.assert_eq(expected_result, psdf.groupby("a").median().sort_index())
# Series
expected_result = ps.Series(
[2.0, 8.0, 7.0], name="b", index=pd.Index([1.0, 2.0, 3.0], name="a")
)
self.assert_eq(expected_result, psdf.groupby("a")["b"].median().sort_index())
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.groupby("a").median(accuracy="a")
def test_tail(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").tail(2).sort_index(), psdf.groupby("a").tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(-2).sort_index(), psdf.groupby("a").tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(100000).sort_index(), psdf.groupby("a").tail(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(2).sort_index(), psdf.groupby("a")["b"].tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(-2).sort_index(),
psdf.groupby("a")["b"].tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].tail(100000).sort_index(),
psdf.groupby("a")["b"].tail(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(2).sort_index(),
psdf.groupby("a")[["b"]].tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(-2).sort_index(),
psdf.groupby("a")[["b"]].tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(100000).sort_index(),
psdf.groupby("a")[["b"]].tail(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).tail(2).sort_index(),
psdf.groupby(psdf.a // 2).tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].tail(2).sort_index(),
psdf.groupby(psdf.a // 2)["b"].tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].tail(2).sort_index(),
psdf.groupby(psdf.a // 2)[["b"]].tail(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).tail(2).sort_index(),
psdf.b.rename().groupby(psdf.a).tail(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).tail(2).sort_index(),
psdf.b.groupby(psdf.a.rename()).tail(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).tail(2).sort_index(),
psdf.b.rename().groupby(psdf.a.rename()).tail(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby("a").tail(2).sort_index(), psdf.groupby("a").tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(-2).sort_index(), psdf.groupby("a").tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(100000).sort_index(), psdf.groupby("a").tail(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(2).sort_index(), psdf.groupby("a")["b"].tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(-2).sort_index(),
psdf.groupby("a")["b"].tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")["b"].tail(100000).sort_index(),
psdf.groupby("a")["b"].tail(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).tail(2).sort_index(),
psdf.groupby(("x", "a")).tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).tail(-2).sort_index(),
psdf.groupby(("x", "a")).tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).tail(100000).sort_index(),
psdf.groupby(("x", "a")).tail(100000).sort_index(),
)
def test_ddof(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
psdf = ps.from_pandas(pdf)
for ddof in (0, 1):
# std
self.assert_eq(
pdf.groupby("a").std(ddof=ddof).sort_index(),
psdf.groupby("a").std(ddof=ddof).sort_index(),
check_exact=False,
)
self.assert_eq(
pdf.groupby("a")["b"].std(ddof=ddof).sort_index(),
psdf.groupby("a")["b"].std(ddof=ddof).sort_index(),
check_exact=False,
)
# var
self.assert_eq(
pdf.groupby("a").var(ddof=ddof).sort_index(),
psdf.groupby("a").var(ddof=ddof).sort_index(),
check_exact=False,
)
self.assert_eq(
pdf.groupby("a")["b"].var(ddof=ddof).sort_index(),
psdf.groupby("a")["b"].var(ddof=ddof).sort_index(),
check_exact=False,
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_groupby import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
aleju/keras
|
examples/kaggle_otto_nn.py
|
8
|
3737
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
np.random.seed(1337) # for reproducibility
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
|
mit
|
ZhuiFengChaseWind/Self-Driving_Car_Capstone
|
ros/src/tl_detector/ipynb/train_model.py
|
1
|
3092
|
# coding: utf-8
# In[38]:
import matplotlib.pyplot as plt
import tensorflow as tf
import glob
from scipy.misc import imread
from scipy.misc import imresize
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Convolution2D, MaxPooling2D, Conv2D, MaxPool2D, Lambda
from keras.layers import BatchNormalization, LeakyReLU
from keras.utils import np_utils
import numpy as np
# In[47]:
# load data
file_names_dict = dict()
for i in [0, 1, 2]:
image_files = glob.glob("/home/michael/tl_training/{}/*.jpg".format(i))
file_names_dict[i] = image_files
min_length = 9999999
# In[48]:
data_dict = dict()
for key in file_names_dict:
length = len(file_names_dict.get(key))
if length < min_length:
min_length = length
for key in file_names_dict:
print(len(file_names_dict.get(key)))
fnames = file_names_dict.get(key)[0:min_length]
images = [imread(x) for x in fnames]
data_dict[key] = images
# In[49]:
X = []
Y = []
for key in data_dict:
x = np.array(data_dict.get(key))
y = np.ones(shape=x.shape[0]) * key
X.append(x)
Y.append(y)
# In[50]:
X_train = np.vstack((X[0], X[1], X[2], X[3]))
Y_train = np.hstack((Y[0], Y[1], Y[2], Y[3]))
# In[51]:
Y_train = np.hstack((Y[0], Y[1], Y[2], Y[3]))
# In[52]:
print(X_train.shape)
print(Y_train.shape)
# In[53]:
del X
del Y
del data_dict
del file_names_dict
# In[54]:
# In[77]:
model = Sequential
model = Sequential([
Lambda(lambda x: x / 255 - 0.5, input_shape=(300, 400, 3)),
Conv2D(8, kernel_size=(5, 5), strides=(2,2)),
LeakyReLU(alpha=0.1),
BatchNormalization(),
MaxPool2D(pool_size=(2,2), strides=(2,2)),
Conv2D(16, kernel_size=(3, 3), strides=(1,1)),
LeakyReLU(alpha=0.1),
BatchNormalization(),
MaxPool2D(pool_size=(2,2), strides=(2,2)),
Conv2D(32, kernel_size=(3, 3), strides=(1, 1)),
LeakyReLU(alpha=0.1),
BatchNormalization(),
Flatten(),
Dense(55),
Dense(4, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.output_shape)
# In[56]:
# training
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range = 10,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True
)
datagen.fit(X_train)
# In[57]:
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
Y_train = Y_train.reshape(-1, 1)
Y_train = enc.fit_transform(Y_train).toarray()
# In[68]:
for i in range(100):
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=64), steps_per_epoch=X_train.shape[0]/64, epochs=10)
model.save('../light_classification/models/whole_image_model_gpu2.h5')
# In[69]:
# In[70]:
# seprate model and weights
yaml_string = model.to_yaml()
# In[71]:
with open("../light_classification/models/model.yaml", 'wt') as f:
f.write(yaml_string)
model.save_weights("../light_classification/models/model_weights.h5")
# In[ ]:
|
mit
|
xwolf12/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
347
|
3268
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
bsd-3-clause
|
sarahgrogan/scikit-learn
|
sklearn/metrics/cluster/unsupervised.py
|
230
|
8281
|
""" Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
|
bsd-3-clause
|
anguswilliams91/OnTheRun
|
code/query.py
|
1
|
3994
|
import sql_utils as sql
import pandas as pd
import numpy as np
import gus_utils as gu
def main_sequence_query():
"""
Retrieve MSTO stars from SDSS DR9 with some quality cuts. Saves the data
as a Pandas DataFrame
"""
getstr = "SELECT spa.ra, spa.dec, spa.dered_u, spa.psfmagerr_u,spa.dered_g,\
spa.psfmagerr_g,spa.dered_r, spa.psfmagerr_r,spa.dered_i, spa.psfmagerr_i,\
spa.dered_z, spa.psfmagerr_z,spp.elodiervfinal,spp.elodiervfinalerr,\
spp.fehadop,spp.fehadopunc,spp.loggadop,spp.loggadopunc,spp.teffadop,spp.teffadopunc, spa.specobjid \
\
FROM sdssdr9.specphotoall AS spa,\
sdssdr9.sppparams AS spp \
\
WHERE spp.specobjid=spa.specobjid \
AND spp.scienceprimary=1 \
AND spa.class='STAR' \
AND spa.TYPE=6\
AND spa.extinction_r<0.3\
AND spa.dered_g-spa.dered_r BETWEEN 0.2 AND 0.35 \
AND spa.dered_r BETWEEN 14.5 AND 20. \
AND spp.fehadop BETWEEN -4. AND -0.9 \
AND spp.loggadop between 3.5 and 4.\
AND spp.teffadop BETWEEN 4500. AND 8000.\
AND spa.psfmagerr_g BETWEEN 0. AND 0.04 \
AND spa.psfmagerr_r BETWEEN 0. AND 0.04 \
AND spa.psfmagerr_i BETWEEN 0. AND 0.04 \
AND spp.fehadopunc < 0.1 \
AND (spp.zwarning=0 OR spp.zwarning=16) \
AND spp.snr > 20."
res = sql.get(getstr)
data = pd.DataFrame(np.array(res).T[:,:-1],columns=['ra','dec','u','u_err','g','g_err','r','r_err','i','i_err',\
'z','z_err','vhel','vhel_err','feh','feh_err','logg',\
'logg_err','teff','teff_err'])
data.loc[:,'specobjid'] = pd.Series(res[-1].astype(np.int64), index=data.index)
l,b = gu.radec2galactic(data.ra.values,data.dec.values)
vgsr = gu.helio2galactic(data.vhel.values,l,b)
data.loc[:,'l'] = pd.Series(l,index=data.index)
data.loc[:,'b'] = pd.Series(b,index=data.index)
data.loc[:,'vgsr'] = pd.Series(vgsr,index=data.index)
s = gu.Ivesic_estimator(data.g.values,data.r.values,data.i.values,data.feh.values)
data = data[(np.abs(data.b)>np.radians(20.))&(data.feh<-0.9)&(s<15.)].reset_index(drop=True)
data.to_csv("/data/aamw3/SDSS/main_sequence.csv")
return None
def bhb_query():
"""
Retrieve BHB stars from SDSS DR9 with some quality cuts. Saves the data
as a Pandas DataFrame
"""
getstr = "SELECT spa.ra,spa.dec,spa.psfmag_g-spa.extinction_g,spa.psfmag_r-spa.extinction_r,spa.psfmagerr_g,spa.psfmagerr_r, \
spp.loggadop,spp.fehadop,spp.teffadop,spp.elodiervfinal,spp.elodiervfinalerr,spa.specobjid \
\
FROM sdssdr9.specphotoall AS spa, \
sdssdr9.sppparams AS spp \
\
WHERE spp.specobjid=spa.specobjid \
AND spp.scienceprimary=1 \
AND spa.class='STAR'\
AND spa.psfmag_g-spa.extinction_g-spa.psfmag_r \
+spa.extinction_r BETWEEN -0.25 AND 0. \
AND spa.psfmag_u-spa.extinction_u-spa.psfmag_g \
+spa.extinction_g BETWEEN 0.9 AND 1.4 \
AND spp.fehadop BETWEEN -2. AND -1. \
AND spp.loggadop BETWEEN 3. AND 3.5 \
AND spp.teffadop BETWEEN 8300. AND 9300. \
AND (spp.zwarning=0 OR spp.zwarning=16) \
AND spp.snr>20."
res = sql.get(getstr)
data = pd.DataFrame(np.array(res).T[:,:-1], columns=['ra','dec','g','r','g_err','r_err','logg','feh','teff','vhel',\
'vhel_err'])
data.loc[:,'specobjid'] = pd.Series(res[-1].astype(np.int64), index=data.index)
l,b = gu.radec2galactic(data.ra.values,data.dec.values)
vgsr = gu.helio2galactic(data.vhel.values,l,b)
data.loc[:,'l'] = pd.Series(l,index=data.index)
data.loc[:,'b'] = pd.Series(b,index=data.index)
data.loc[:,'vgsr'] = pd.Series(vgsr,index=data.index)
data = data[(np.abs(data.b)>np.radians(20.))].reset_index(drop=True)
data.to_csv("/data/aamw3/SDSS/bhb.csv")
return None
"""
To make the K giant sample, I downloaded the data from Xue et al. and then cut so that [Fe/H]<-0.9, |b| > 20 degrees and rgc<50kpc.
"""
|
mit
|
Karel-van-de-Plassche/bokeh
|
examples/models/file/anscombe.py
|
12
|
3015
|
from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import gridplot
from bokeh.models.glyphs import Circle, Line
from bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Range1d
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(x_range=xdr, y_range=ydr, plot_width=400, plot_height=400,
border_fill_color='white', background_fill_color='#e9e0db')
plot.title.text = title
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = gridplot([[I, II], [III, IV]], toolbar_location=None)
doc = Document()
doc.add_root(grid)
if __name__ == "__main__":
doc.validate()
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
|
ngoix/OCRF
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
258
|
2861
|
from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
ContinuumIO/dask
|
dask/array/core.py
|
1
|
153024
|
import math
import operator
import os
import pickle
import re
import sys
import traceback
import uuid
import warnings
from bisect import bisect
from collections.abc import Iterable, Iterator, Mapping
from functools import partial, wraps, reduce
from itertools import product, zip_longest
from numbers import Number, Integral
from operator import add, getitem, mul
from threading import Lock
from tlz import partition, concat, first, groupby, accumulate, frequencies
from tlz.curried import pluck
import numpy as np
from . import chunk
from .. import config, compute
from ..base import (
DaskMethodsMixin,
tokenize,
dont_optimize,
compute_as_if_collection,
persist,
is_dask_collection,
)
from ..blockwise import broadcast_dimensions
from ..context import globalmethod
from ..utils import (
ndeepmap,
ignoring,
concrete,
derived_from,
is_integer,
IndexCallable,
funcname,
SerializableLock,
Dispatch,
factors,
parse_bytes,
has_keyword,
M,
ndimlist,
format_bytes,
typename,
)
from ..core import quote
from ..delayed import delayed, Delayed
from .. import threaded, core
from ..sizeof import sizeof
from ..highlevelgraph import HighLevelGraph
from .numpy_compat import _Recurser, _make_sliced_dtype
from .slicing import slice_array, replace_ellipsis, cached_cumsum
from .blockwise import blockwise
config.update_defaults({"array": {"chunk-size": "128MiB", "rechunk-threshold": 4}})
concatenate_lookup = Dispatch("concatenate")
tensordot_lookup = Dispatch("tensordot")
einsum_lookup = Dispatch("einsum")
concatenate_lookup.register((object, np.ndarray), np.concatenate)
tensordot_lookup.register((object, np.ndarray), np.tensordot)
einsum_lookup.register((object, np.ndarray), np.einsum)
unknown_chunk_message = (
"\n\n"
"A possible solution: "
"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\n"
"Summary: to compute chunks sizes, use\n\n"
" x.compute_chunk_sizes() # for Dask Array `x`\n"
" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`"
)
class PerformanceWarning(Warning):
""" A warning given when bad chunking may cause poor performance """
def getter(a, b, asarray=True, lock=None):
if isinstance(b, tuple) and any(x is None for x in b):
b2 = tuple(x for x in b if x is not None)
b3 = tuple(
None if x is None else slice(None, None)
for x in b
if not isinstance(x, Integral)
)
return getter(a, b2, asarray=asarray, lock=lock)[b3]
if lock:
lock.acquire()
try:
c = a[b]
if asarray:
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
def getter_nofancy(a, b, asarray=True, lock=None):
""" A simple wrapper around ``getter``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getter(a, b, asarray=asarray, lock=lock)
def getter_inline(a, b, asarray=True, lock=None):
""" A getter function that optimizations feel comfortable inlining
Slicing operations with this function may be inlined into a graph, such as
in the following rewrite
**Before**
>>> a = x[:10] # doctest: +SKIP
>>> b = a + 1 # doctest: +SKIP
>>> c = a * 2 # doctest: +SKIP
**After**
>>> b = x[:10] + 1 # doctest: +SKIP
>>> c = x[:10] * 2 # doctest: +SKIP
This inlining can be relevant to operations when running off of disk.
"""
return getter(a, b, asarray=asarray, lock=lock)
from .optimization import optimize, fuse_slice
# __array_function__ dict for mapping aliases and mismatching names
_HANDLED_FUNCTIONS = {}
def implements(*numpy_functions):
"""Register an __array_function__ implementation for dask.array.Array
Register that a function implements the API of a NumPy function (or several
NumPy functions in case of aliases) which is handled with
``__array_function__``.
Parameters
----------
\\*numpy_functions : callables
One or more NumPy functions that are handled by ``__array_function__``
and will be mapped by `implements` to a `dask.array` function.
"""
def decorator(dask_func):
for numpy_function in numpy_functions:
_HANDLED_FUNCTIONS[numpy_function] = dask_func
return dask_func
return decorator
def slices_from_chunks(chunks):
""" Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
slices = [
[slice(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
return list(product(*slices))
def getem(
arr,
chunks,
getitem=getter,
shape=None,
out_name=None,
lock=False,
asarray=True,
dtype=None,
):
""" Dask getting various chunks from an array-like
>>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP
{('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}
>>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP
{('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}
"""
out_name = out_name or arr
chunks = normalize_chunks(chunks, shape, dtype=dtype)
keys = product([out_name], *(range(len(bds)) for bds in chunks))
slices = slices_from_chunks(chunks)
if (
has_keyword(getitem, "asarray")
and has_keyword(getitem, "lock")
and (not asarray or lock)
):
values = [(getitem, arr, x, asarray, lock) for x in slices]
else:
# Common case, drop extra parameters
values = [(getitem, arr, x) for x in slices]
return dict(zip(keys, values))
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
""" Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def _concatenate2(arrays, axes=[]):
""" Recursively Concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
If axes is an empty list or tuple, return arrays, or arrays[0] if
arrays is a list.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
Special Case
>>> _concatenate2([x, x], axes=())
array([[1, 2],
[3, 4]])
"""
if axes == ():
if isinstance(arrays, list):
return arrays[0]
else:
return arrays
if isinstance(arrays, Iterator):
arrays = list(arrays)
if not isinstance(arrays, (list, tuple)):
return arrays
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
concatenate = concatenate_lookup.dispatch(
type(max(arrays, key=lambda x: getattr(x, "__array_priority__", 0)))
)
return concatenate(arrays, axis=axes[0])
def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype="dtype", nout=None):
"""
Tries to infer output dtype of ``func`` for a small set of input arguments.
Parameters
----------
func: Callable
Function for which output dtype is to be determined
args: List of array like
Arguments to the function, which would usually be used. Only attributes
``ndim`` and ``dtype`` are used.
kwargs: dict
Additional ``kwargs`` to the ``func``
funcname: String
Name of calling function to improve potential error messages
suggest_dtype: None/False or String
If not ``None`` adds suggestion to potential error message to specify a dtype
via the specified kwarg. Defaults to ``'dtype'``.
nout: None or Int
``None`` if function returns single output, integer if many.
Deafults to ``None``.
Returns
-------
: dtype or List of dtype
One or many dtypes (depending on ``nout``)
"""
args = [
np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x
for x in args
]
try:
with np.errstate(all="ignore"):
o = func(*args, **kwargs)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = "".join(traceback.format_tb(exc_traceback))
suggest = (
(
"Please specify the dtype explicitly using the "
"`{dtype}` kwarg.\n\n".format(dtype=suggest_dtype)
)
if suggest_dtype
else ""
)
msg = (
"`dtype` inference failed in `{0}`.\n\n"
"{1}"
"Original error is below:\n"
"------------------------\n"
"{2}\n\n"
"Traceback:\n"
"---------\n"
"{3}"
).format(funcname, suggest, repr(e), tb)
else:
msg = None
if msg is not None:
raise ValueError(msg)
return o.dtype if nout is None else tuple(e.dtype for e in o)
def normalize_arg(x):
""" Normalize user provided arguments to blockwise or map_blocks
We do a few things:
1. If they are string literals that might collide with blockwise_token then we
quote them
2. IF they are large (as defined by sizeof) then we put them into the
graph on their own by using dask.delayed
"""
if is_dask_collection(x):
return x
elif isinstance(x, str) and re.match(r"_\d+", x):
return delayed(x)
elif isinstance(x, list) and len(x) >= 10:
return delayed(x)
elif sizeof(x) > 1e6:
return delayed(x)
else:
return x
def _pass_extra_kwargs(func, keys, *args, **kwargs):
""" Helper for :func:`map_blocks` to pass `block_info` or `block_id`.
For each element of `keys`, a corresponding element of args is changed
to a keyword argument with that key, before all arguments re passed on
to `func`.
"""
kwargs.update(zip(keys, args))
return func(*args[len(keys) :], **kwargs)
def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=[],
new_axis=None,
meta=None,
**kwargs,
):
""" Map a function across all blocks of a dask array.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function get information about where it is in the array by
accepting a special ``block_info`` keyword argument.
>>> def func(block, block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
For each argument and keyword arguments that are dask arrays (the positions
of which are the first index), you will receive the shape of the full
array, the number of chunks of the full array in each dimension, the chunk
location (for example the fourth chunk over in the first dimension), and
the array location (for example the slice corresponding to ``40:50``). The
same information is provided for the output, with the key ``None``, plus
the shape and dtype that should be returned.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP
dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>
"""
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
if token:
warnings.warn("The token= keyword to map_blocks has been moved to name=")
name = token
name = "%s-%s" % (name or funcname(func), tokenize(func, *args, **kwargs))
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = [
(a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)
for a in args
]
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
"Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(out_ind))
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an array of block IDs and
# prepare to inject it.
if has_keyword(func, "block_id"):
block_id_name = "block-id-" + out.name
block_id_dsk = {
(block_id_name,) + block_id: block_id
for block_id in product(*(range(len(c)) for c in out.chunks))
}
block_id_array = Array(
block_id_dsk,
block_id_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_id_array, out_ind))
extra_names.append("block_id")
# If func has block_info as an argument, construct an array of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_name = "block-info-" + out.name
block_info_dsk = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dsk[(block_info_name,) + block_id] = info
block_info = Array(
block_info_dsk,
block_info_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_info, out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
def broadcast_chunks(*chunkss):
""" Construct a chunks tuple that broadcasts many chunks tuples
>>> a = ((5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((5, 5),)
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((1,), (5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((3, 3,), (5, 5),)
>>> broadcast_chunks(a, b)
Traceback (most recent call last):
...
ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]
"""
if not chunkss:
return ()
elif len(chunkss) == 1:
return chunkss[0]
n = max(map(len, chunkss))
chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]
result = []
for i in range(n):
step1 = [c[i] for c in chunkss2]
if all(c == (1,) for c in step1):
step2 = step1
else:
step2 = [c for c in step1 if c != (1,)]
if len(set(step2)) != 1:
raise ValueError("Chunks do not align: %s" % str(step2))
result.append(step2[0])
return tuple(result)
def store(
sources,
targets,
lock=True,
regions=None,
compute=True,
return_stored=False,
**kwargs,
):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or iterable of Arrays
targets: array-like or Delayed or iterable of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular ``threading.Lock`` object to be shared among all writes.
regions: tuple of slices or list of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately, return ``dask.delayed.Delayed`` otherwise
return_stored: boolean, optional
Optionally return the stored result (default False).
Examples
--------
>>> x = ... # doctest: +SKIP
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions]
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources)
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_dsk = Array.__dask_optimize__(
sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]
# Optimize all targets together
targets2 = []
targets_keys = []
targets_dsk = []
for e in targets:
if isinstance(e, Delayed):
targets2.append(e.key)
targets_keys.extend(e.__dask_keys__())
targets_dsk.append(e.__dask_graph__())
elif is_dask_collection(e):
raise TypeError("Targets must be either Delayed objects or array-likes")
else:
targets2.append(e)
targets_dsk = HighLevelGraph.merge(*targets_dsk)
targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)
load_stored = return_stored and not compute
toks = [str(uuid.uuid1()) for _ in range(len(sources))]
store_dsk = HighLevelGraph.merge(
*[
insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)
for s, t, r, tok in zip(sources2, targets2, regions, toks)
]
)
store_keys = list(store_dsk.keys())
store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)
if return_stored:
load_store_dsk = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk) for k in store_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)
result = tuple(
Array(load_store_dsk, "load-store-%s" % t, s.chunks, meta=s)
for s, t in zip(sources, toks)
)
return result
else:
name = "store-" + str(uuid.uuid1())
dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)
result = Delayed(name, dsk)
if compute:
result.compute(**kwargs)
return None
else:
return result
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
>>> blockdims_from_blockshape((10, 0), (4, 0))
((4, 4, 2), (0,))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if np.isnan(sum(shape)) or np.isnan(sum(chunks)):
raise ValueError(
"Array chunk sizes are unknown. shape: %s, chunks: %s%s"
% (shape, chunks, unknown_chunk_message)
)
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = tuple(map(int, shape))
chunks = tuple(map(int, chunks))
return tuple(
((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))
for d, bd in zip(shape, chunks)
)
def finalize(results):
if not results:
return concatenate3(results)
results2 = results
while isinstance(results2, (tuple, list)):
if len(results2) > 1:
return concatenate3(results)
else:
results2 = results2[0]
return unpack_singleton(results)
CHUNKS_NONE_ERROR_MESSAGE = """
You must specify a chunks= keyword argument.
This specifies the chunksize of your array blocks.
See the following documentation page for details:
https://docs.dask.org/en/latest/array-creation.html#chunks
""".strip()
class Array(DaskMethodsMixin):
""" Parallel Dask Array
A parallel nd-array comprised of many numpy arrays arranged in a grid.
This constructor is for advanced uses only. For normal use see the
``da.from_array`` function.
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
dtype : str or dtype
Typecode or data-type for the new Dask Array
meta : empty ndarray
empty ndarray created with same NumPy backend, ndim and dtype as the
Dask Array being created (overrides dtype)
See Also
--------
dask.array.from_array
"""
__slots__ = "dask", "_name", "_cached_keys", "_chunks", "_meta"
def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):
self = super(Array, cls).__new__(cls)
assert isinstance(dask, Mapping)
if not isinstance(dask, HighLevelGraph):
dask = HighLevelGraph.from_collections(name, dask, dependencies=())
self.dask = dask
self.name = str(name)
meta = meta_from_array(meta, dtype=dtype)
if (
isinstance(chunks, str)
or isinstance(chunks, tuple)
and chunks
and any(isinstance(c, str) for c in chunks)
):
dt = meta.dtype
else:
dt = None
self._chunks = normalize_chunks(chunks, shape, dtype=dt)
if self._chunks is None:
raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)
self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)
for plugin in config.get("array_plugins", ()):
result = plugin(self)
if result is not None:
self = result
return self
def __reduce__(self):
return (Array, (self.dask, self.name, self.chunks, self.dtype))
def __dask_graph__(self):
return self.dask
def __dask_layers__(self):
return (self.name,)
def __dask_keys__(self):
if self._cached_keys is not None:
return self._cached_keys
name, chunks, numblocks = self.name, self.chunks, self.numblocks
def keys(*args):
if not chunks:
return [(name,)]
ind = len(args)
if ind + 1 == len(numblocks):
result = [(name,) + args + (i,) for i in range(numblocks[ind])]
else:
result = [keys(*(args + (i,))) for i in range(numblocks[ind])]
return result
self._cached_keys = result = keys()
return result
def __dask_tokenize__(self):
return self.name
__dask_optimize__ = globalmethod(
optimize, key="array_optimize", falsey=dont_optimize
)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return Array, (self.name, self.chunks, self.dtype, self._meta)
@property
def numblocks(self):
return tuple(map(len, self.chunks))
@property
def npartitions(self):
return reduce(mul, self.numblocks, 1)
def compute_chunk_sizes(self):
"""
Compute the chunk sizes for a Dask array. This is especially useful
when the chunk sizes are unknown (e.g., when indexing one Dask array
with another).
Notes
-----
This function modifies the Dask array in-place.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)
>>> x.chunks
((2, 2, 1),)
>>> y = x[x <= 0]
>>> y.chunks
((nan, nan, nan),)
>>> y.compute_chunk_sizes() # in-place computation
dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> y.chunks
((2, 1, 0),)
"""
x = self
chunk_shapes = x.map_blocks(
_get_chunk_shape,
dtype=int,
chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),
new_axis=x.ndim,
)
c = []
for i in range(x.ndim):
s = x.ndim * [0] + [i]
s[i] = slice(None)
s = tuple(s)
c.append(tuple(chunk_shapes[s]))
# `map_blocks` assigns numpy dtypes
# cast chunk dimensions back to python int before returning
x._chunks = tuple(
[tuple([int(chunk) for chunk in chunks]) for chunks in compute(tuple(c))[0]]
)
return x
@property
def shape(self):
return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)
@property
def chunksize(self):
return tuple(max(c) for c in self.chunks)
@property
def dtype(self):
return self._meta.dtype
def _get_chunks(self):
return self._chunks
def _set_chunks(self, chunks):
msg = (
"Can not set chunks directly\n\n"
"Please use the rechunk method instead:\n"
" x.rechunk({})\n\n"
"If trying to avoid unknown chunks, use\n"
" x.compute_chunk_sizes()"
)
raise TypeError(msg.format(chunks))
chunks = property(_get_chunks, _set_chunks, "chunks property")
def __len__(self):
if not self.chunks:
raise TypeError("len() of unsized object")
return sum(self.chunks[0])
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, (np.ndarray, Number, Array)):
return NotImplemented
if method == "__call__":
if numpy_ufunc is np.matmul:
from .routines import matmul
# special case until apply_gufunc handles optional dimensions
return matmul(*inputs, **kwargs)
if numpy_ufunc.signature is not None:
from .gufunc import apply_gufunc
return apply_gufunc(
numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs
)
if numpy_ufunc.nout > 1:
from . import ufunc
try:
da_ufunc = getattr(ufunc, numpy_ufunc.__name__)
except AttributeError:
return NotImplemented
return da_ufunc(*inputs, **kwargs)
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
elif method == "outer":
from . import ufunc
try:
da_ufunc = getattr(ufunc, numpy_ufunc.__name__)
except AttributeError:
return NotImplemented
return da_ufunc.outer(*inputs, **kwargs)
else:
return NotImplemented
def __repr__(self):
"""
>>> import dask.array as da
>>> da.ones((10, 10), chunks=(5, 5), dtype='i4')
dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>
"""
chunksize = str(self.chunksize)
name = self.name.rsplit("-", 1)[0]
return "dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>" % (
name,
self.shape,
self.dtype,
chunksize,
type(self._meta).__module__.split(".")[0],
type(self._meta).__name__,
)
def _repr_html_(self):
table = self._repr_html_table()
try:
grid = self.to_svg(size=config.get("array.svg.size", 120))
except NotImplementedError:
grid = ""
both = [
"<table>",
"<tr>",
"<td>",
table,
"</td>",
"<td>",
grid,
"</td>",
"</tr>",
"</table>",
]
return "\n".join(both)
def _repr_html_table(self):
if "sparse" in typename(type(self._meta)):
nbytes = None
cbytes = None
elif not math.isnan(self.nbytes):
nbytes = format_bytes(self.nbytes)
cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)
else:
nbytes = "unknown"
cbytes = "unknown"
table = [
"<table>",
" <thead>",
" <tr><td> </td><th> Array </th><th> Chunk </th></tr>",
" </thead>",
" <tbody>",
" <tr><th> Bytes </th><td> %s </td> <td> %s </td></tr>"
% (nbytes, cbytes)
if nbytes is not None
else "",
" <tr><th> Shape </th><td> %s </td> <td> %s </td></tr>"
% (str(self.shape), str(self.chunksize)),
" <tr><th> Count </th><td> %d Tasks </td><td> %d Chunks </td></tr>"
% (len(self.__dask_graph__()), self.npartitions),
" <tr><th> Type </th><td> %s </td><td> %s.%s </td></tr>"
% (
self.dtype,
type(self._meta).__module__.split(".")[0],
type(self._meta).__name__,
),
" </tbody>",
"</table>",
]
return "\n".join(table)
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
""" Number of elements in array """
return reduce(mul, self.shape, 1)
@property
def nbytes(self):
""" Number of bytes in array """
return self.size * self.dtype.itemsize
@property
def itemsize(self):
""" Length of one array element in bytes """
return self.dtype.itemsize
@property
def name(self):
return self._name
@name.setter
def name(self, val):
self._name = val
# Clear the key cache when the name is reset
self._cached_keys = None
__array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
def __array_function__(self, func, types, args, kwargs):
import dask.array as module
def handle_nonmatching_names(func, args, kwargs):
if func not in _HANDLED_FUNCTIONS:
warnings.warn(
"The `{}` function is not implemented by Dask array. "
"You may want to use the da.map_blocks function "
"or something similar to silence this warning. "
"Your code may stop working in a future release.".format(
func.__module__ + "." + func.__name__
),
FutureWarning,
)
# Need to convert to array object (e.g. numpy.ndarray or
# cupy.ndarray) as needed, so we can call the NumPy function
# again and it gets the chance to dispatch to the right
# implementation.
args, kwargs = compute(args, kwargs)
return func(*args, **kwargs)
return _HANDLED_FUNCTIONS[func](*args, **kwargs)
# First try to find a matching function name. If that doesn't work, we may
# be dealing with an alias or a function that's simply not in the Dask API.
# Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.
for submodule in func.__module__.split(".")[1:]:
try:
module = getattr(module, submodule)
except AttributeError:
return handle_nonmatching_names(func, args, kwargs)
if not hasattr(module, func.__name__):
return handle_nonmatching_names(func, args, kwargs)
da_func = getattr(module, func.__name__)
if da_func is func:
return handle_nonmatching_names(func, args, kwargs)
return da_func(*args, **kwargs)
@property
def _elemwise(self):
return elemwise
@wraps(store)
def store(self, target, **kwargs):
r = store([self], [target], **kwargs)
if kwargs.get("return_stored", False):
r = r[0]
return r
def to_svg(self, size=500):
""" Convert chunks from Dask Array into an SVG Image
Parameters
----------
chunks: tuple
size: int
Rough size of the image
Examples
--------
>>> x.to_svg(size=500) # doctest: +SKIP
Returns
-------
text: An svg string depicting the array as a grid of chunks
"""
from .svg import svg
return svg(self.chunks, size=size)
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
return to_hdf5(filename, datapath, self, **kwargs)
def to_dask_dataframe(self, columns=None, index=None, meta=None):
""" Convert dask Array to dask Dataframe
Parameters
----------
columns: list or string
list of column names if DataFrame, single string if Series
index : dask.dataframe.Index, optional
An optional *dask* Index to use for the output Series or DataFrame.
The default output index depends on whether the array has any unknown
chunks. If there are any unknown chunks, the output has ``None``
for all the divisions (one per chunk). If all the chunks are known,
a default index with known divsions is created.
Specifying ``index`` can be useful if you're conforming a Dask Array
to an existing dask Series or DataFrame, and you would like the
indices to match.
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to use for partitions of
the Dask dataframe. By default, pandas DataFrame is used.
See Also
--------
dask.dataframe.from_dask_array
"""
from ..dataframe import from_dask_array
return from_dask_array(self, columns=columns, index=index, meta=meta)
def __bool__(self):
if self.size > 1:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.any() or a.all().".format(self.__class__.__name__)
)
else:
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted to Python scalars")
else:
return cast_type(self.compute())
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __float__(self):
return self._scalarfunc(float)
def __complex__(self):
return self._scalarfunc(complex)
def __setitem__(self, key, value):
from .routines import where
if isinstance(key, Array):
if isinstance(value, Array) and value.ndim > 1:
raise ValueError("boolean index array should have 1 dimension")
y = where(key, value, self)
self._meta = y._meta
self.dask = y.dask
self.name = y.name
self._chunks = y.chunks
return self
else:
raise NotImplementedError(
"Item assignment with %s not supported" % type(key)
)
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if isinstance(index, str) or (
isinstance(index, list) and index and all(isinstance(i, str) for i in index)
):
if isinstance(index, str):
dt = self.dtype[index]
else:
dt = _make_sliced_dtype(self.dtype, index)
if dt.shape:
new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))
chunks = self.chunks + tuple((i,) for i in dt.shape)
return self.map_blocks(
getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis
)
else:
return self.map_blocks(getitem, index, dtype=dt)
if not isinstance(index, tuple):
index = (index,)
from .slicing import (
normalize_index,
slice_with_int_dask_array,
slice_with_bool_dask_array,
)
index2 = normalize_index(index, self.shape)
dependencies = {self.name}
for i in index2:
if isinstance(i, Array):
dependencies.add(i.name)
if any(isinstance(i, Array) and i.dtype.kind in "iu" for i in index2):
self, index2 = slice_with_int_dask_array(self, index2)
if any(isinstance(i, Array) and i.dtype == bool for i in index2):
self, index2 = slice_with_bool_dask_array(self, index2)
if all(isinstance(i, slice) and i == slice(None) for i in index2):
return self
out = "getitem-" + tokenize(self, index2)
dsk, chunks = slice_array(out, self.name, self.chunks, index2)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])
meta = meta_from_array(self._meta, ndim=len(chunks))
if np.isscalar(meta):
meta = np.array(meta)
return Array(graph, out, chunks, meta=meta)
def _vindex(self, key):
if not isinstance(key, tuple):
key = (key,)
if any(k is None for k in key):
raise IndexError(
"vindex does not support indexing with None (np.newaxis), "
"got {}".format(key)
)
if all(isinstance(k, slice) for k in key):
if all(
k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)
):
return self
raise IndexError(
"vindex requires at least one non-slice to vectorize over "
"when the slices are not over the entire array (i.e, x[:]). "
"Use normal slicing instead when only using slices. Got: {}".format(key)
)
return _vindex(self, *key)
@property
def vindex(self):
"""Vectorized indexing with broadcasting.
This is equivalent to numpy's advanced indexing, using arrays that are
broadcast against each other. This allows for pointwise indexing:
>>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> x = from_array(x, chunks=2)
>>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()
array([1, 5, 9])
Mixed basic/advanced indexing with slices/arrays is also supported. The
order of dimensions in the result follows those proposed for
`ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:
the subspace spanned by arrays is followed by all slices.
Note: ``vindex`` provides more general functionality than standard
indexing, but it also has fewer optimizations and can be significantly
slower.
"""
return IndexCallable(self._vindex)
def _blocks(self, index):
from .slicing import normalize_index
if not isinstance(index, tuple):
index = (index,)
if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:
raise ValueError("Can only slice with a single list")
if any(ind is None for ind in index):
raise ValueError("Slicing with np.newaxis or None is not supported")
index = normalize_index(index, self.numblocks)
index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)
name = "blocks-" + tokenize(self, index)
new_keys = np.array(self.__dask_keys__(), dtype=object)[index]
chunks = tuple(
tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)
)
keys = product(*(range(len(c)) for c in chunks))
layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])
return Array(graph, name, chunks, meta=self)
@property
def blocks(self):
""" Slice an array by blocks
This allows blockwise slicing of a Dask array. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new
dask array with every other block in the first row of blocks.
You can index blocks in any way that could index a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.numblocks). The dimension of the output array will be the same
as the dimension of this array, even if integer indices are passed.
This does not support slicing with ``np.newaxis`` or multiple lists.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(10, chunks=2)
>>> x.blocks[0].compute()
array([0, 1])
>>> x.blocks[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> x.blocks[::2].compute()
array([0, 1, 4, 5, 8, 9])
>>> x.blocks[[-1, 0]].compute()
array([8, 9, 0, 1])
Returns
-------
A Dask array
"""
return IndexCallable(self._blocks)
@property
def partitions(self):
"""Slice an array by partitions. Alias of dask array .blocks attribute.
This alias allows you to write agnostic code that works with both
dask arrays and dask dataframes.
This allows blockwise slicing of a Dask array. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new
dask array with every other block in the first row of blocks.
You can index blocks in any way that could index a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.numblocks). The dimension of the output array will be the same
as the dimension of this array, even if integer indices are passed.
This does not support slicing with ``np.newaxis`` or multiple lists.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(10, chunks=2)
>>> x.partitions[0].compute()
array([0, 1])
>>> x.partitions[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> x.partitions[::2].compute()
array([0, 1, 4, 5, 8, 9])
>>> x.partitions[[-1, 0]].compute()
array([8, 9, 0, 1])
>>> all(x.partitions[:].compute() == x.blocks[:].compute())
True
Returns
-------
A Dask array
"""
return self.blocks
@derived_from(np.ndarray)
def dot(self, other):
from .routines import tensordot
return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))
@property
def A(self):
return self
@property
def T(self):
return self.transpose()
@derived_from(np.ndarray)
def transpose(self, *axes):
from .routines import transpose
if not axes:
axes = None
elif len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):
# no transpose necessary
return self
else:
return transpose(self, axes=axes)
@derived_from(np.ndarray)
def ravel(self):
from .routines import ravel
return ravel(self)
flatten = ravel
@derived_from(np.ndarray)
def choose(self, choices):
from .routines import choose
return choose(self, choices)
@derived_from(np.ndarray)
def reshape(self, *shape):
from .reshape import reshape
if len(shape) == 1 and not isinstance(shape[0], Number):
shape = shape[0]
return reshape(self, shape)
def topk(self, k, axis=-1, split_every=None):
"""The top k elements of an array.
See ``da.topk`` for docstring"""
from .reductions import topk
return topk(self, k, axis=axis, split_every=split_every)
def argtopk(self, k, axis=-1, split_every=None):
"""The indices of the top k elements of an array.
See ``da.argtopk`` for docstring"""
from .reductions import argtopk
return argtopk(self, k, axis=axis, split_every=split_every)
def astype(self, dtype, **kwargs):
"""Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
"""
# Scalars don't take `casting` or `copy` kwargs - as such we only pass
# them to `map_blocks` if specified by user (different than defaults).
extra = set(kwargs) - {"casting", "copy"}
if extra:
raise TypeError(
"astype does not take the following keyword "
"arguments: {0!s}".format(list(extra))
)
casting = kwargs.get("casting", "unsafe")
dtype = np.dtype(dtype)
if self.dtype == dtype:
return self
elif not np.can_cast(self.dtype, dtype, casting=casting):
raise TypeError(
"Cannot cast array from {0!r} to {1!r}"
" according to the rule "
"{2!r}".format(self.dtype, dtype, casting)
)
return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
def __matmul__(self, other):
from .routines import matmul
return matmul(self, other)
def __rmatmul__(self, other):
from .routines import matmul
return matmul(other, self)
def __divmod__(self, other):
from .ufunc import divmod
return divmod(self, other)
def __rdivmod__(self, other):
from .ufunc import divmod
return divmod(other, self)
@derived_from(np.ndarray)
def any(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import any
return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def all(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import all
return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def min(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import min
return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def max(self, axis=None, keepdims=False, split_every=None, out=None):
from .reductions import max
return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
@derived_from(np.ndarray)
def argmin(self, axis=None, split_every=None, out=None):
from .reductions import argmin
return argmin(self, axis=axis, split_every=split_every, out=out)
@derived_from(np.ndarray)
def argmax(self, axis=None, split_every=None, out=None):
from .reductions import argmax
return argmax(self, axis=axis, split_every=split_every, out=out)
@derived_from(np.ndarray)
def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
from .reductions import sum
return sum(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
from .reductions import trace
return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
@derived_from(np.ndarray)
def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
from .reductions import prod
return prod(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
from .reductions import mean
return mean(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def std(
self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
from .reductions import std
return std(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
@derived_from(np.ndarray)
def var(
self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
from .reductions import var
return var(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
def moment(
self,
order,
axis=None,
dtype=None,
keepdims=False,
ddof=0,
split_every=None,
out=None,
):
"""Calculate the nth centralized moment.
Parameters
----------
order : int
Order of the moment that is returned, must be >= 2.
axis : int, optional
Axis along which the central moment is computed. The default is to
compute the moment of the flattened array.
dtype : data-type, optional
Type to use in computing the moment. For arrays of integer type the
default is float64; for arrays of float types it is the same as the
array type.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
N - ddof, where N represents the number of elements. By default
ddof is zero.
Returns
-------
moment : ndarray
References
----------
.. [1] Pebay, Philippe (2008), "Formulas for Robust, One-Pass Parallel
Computation of Covariances and Arbitrary-Order Statistical Moments",
Technical Report SAND2008-6212, Sandia National Laboratories.
"""
from .reductions import moment
return moment(
self,
order,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
@wraps(map_blocks)
def map_blocks(self, func, *args, **kwargs):
return map_blocks(func, self, *args, **kwargs)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
""" Map a function over blocks of the array with some overlap
We share neighboring zones between blocks of the array, then map a
function, then trim away the neighboring strips.
Parameters
----------
func: function
The function to apply to each extended block
depth: int, tuple, or dict
The number of elements that each block should share with its neighbors
If a tuple or dict then this can be different per axis
boundary: str, tuple, dict
How to handle the boundaries.
Values include 'reflect', 'periodic', 'nearest', 'none',
or any constant value like 0 or np.nan
trim: bool
Whether or not to trim ``depth`` elements from each block after
calling the map function.
Set this to False if your mapping function already does this for you
**kwargs:
Other keyword arguments valid in ``map_blocks``
Examples
--------
>>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])
>>> x = from_array(x, chunks=5)
>>> def derivative(x):
... return x - np.roll(x, 1)
>>> y = x.map_overlap(derivative, depth=1, boundary=0)
>>> y.compute()
array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])
>>> import dask.array as da
>>> x = np.arange(16).reshape((4, 4))
>>> d = da.from_array(x, chunks=(2, 2))
>>> d.map_overlap(lambda x: x + x.size, depth=1).compute()
array([[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27],
[28, 29, 30, 31]])
>>> func = lambda x: x + x.size
>>> depth = {0: 1, 1: 1}
>>> boundary = {0: 'reflect', 1: 'none'}
>>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE
array([[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23],
[24, 25, 26, 27]])
"""
from .overlap import map_overlap
return map_overlap(
func, self, depth=depth, boundary=boundary, trim=trim, **kwargs
)
@derived_from(np.ndarray)
def cumsum(self, axis, dtype=None, out=None):
from .reductions import cumsum
return cumsum(self, axis, dtype, out=out)
@derived_from(np.ndarray)
def cumprod(self, axis, dtype=None, out=None):
from .reductions import cumprod
return cumprod(self, axis, dtype, out=out)
@derived_from(np.ndarray)
def squeeze(self, axis=None):
from .routines import squeeze
return squeeze(self, axis)
def rechunk(self, chunks="auto", threshold=None, block_size_limit=None):
""" See da.rechunk for docstring """
from . import rechunk # avoid circular import
return rechunk(self, chunks, threshold, block_size_limit)
@property
def real(self):
from .ufunc import real
return real(self)
@property
def imag(self):
from .ufunc import imag
return imag(self)
def conj(self):
from .ufunc import conj
return conj(self)
@derived_from(np.ndarray)
def clip(self, min=None, max=None):
from .ufunc import clip
return clip(self, min, max)
def view(self, dtype=None, order="C"):
""" Get a view of the array as a new data type
Parameters
----------
dtype:
The dtype by which to view the array.
The default, None, results in the view having the same data-type
as the original array.
order: string
'C' or 'F' (Fortran) ordering
This reinterprets the bytes of the array under a new dtype. If that
dtype does not have the same size as the original array then the shape
will change.
Beware that both numpy and dask.array can behave oddly when taking
shape-changing views of arrays under Fortran ordering. Under some
versions of NumPy this function will fail when taking shape-changing
views of Fortran ordered arrays if the first dimension has chunks of
size one.
"""
if dtype is None:
dtype = self.dtype
else:
dtype = np.dtype(dtype)
mult = self.dtype.itemsize / dtype.itemsize
if order == "C":
chunks = self.chunks[:-1] + (
tuple(ensure_int(c * mult) for c in self.chunks[-1]),
)
elif order == "F":
chunks = (
tuple(ensure_int(c * mult) for c in self.chunks[0]),
) + self.chunks[1:]
else:
raise ValueError("Order must be one of 'C' or 'F'")
return self.map_blocks(
chunk.view, dtype, order=order, dtype=dtype, chunks=chunks
)
@derived_from(np.ndarray)
def swapaxes(self, axis1, axis2):
from .routines import swapaxes
return swapaxes(self, axis1, axis2)
@derived_from(np.ndarray)
def round(self, decimals=0):
from .routines import round
return round(self, decimals=decimals)
def copy(self):
"""
Copy array. This is a no-op for dask.arrays, which are immutable
"""
if self.npartitions == 1:
return self.map_blocks(M.copy)
else:
return Array(self.dask, self.name, self.chunks, meta=self)
def __deepcopy__(self, memo):
c = self.copy()
memo[id(self)] = c
return c
def to_delayed(self, optimize_graph=True):
"""Convert into an array of ``dask.delayed`` objects, one per chunk.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
See Also
--------
dask.array.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
if optimize_graph:
graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph
name = "delayed-" + self.name
graph = HighLevelGraph.from_collections(name, graph, dependencies=())
L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)
return np.array(L, dtype=object)
@derived_from(np.ndarray)
def repeat(self, repeats, axis=None):
from .creation import repeat
return repeat(self, repeats, axis=axis)
@derived_from(np.ndarray)
def nonzero(self):
from .routines import nonzero
return nonzero(self)
def to_zarr(self, *args, **kwargs):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
See function ``to_zarr()`` for parameters.
"""
return to_zarr(self, *args, **kwargs)
def to_tiledb(self, uri, *args, **kwargs):
"""Save array to the TileDB storage manager
See function ``to_tiledb()`` for argument documentation.
See https://docs.tiledb.io for details about the format and engine.
"""
from .tiledb_io import to_tiledb
return to_tiledb(self, uri, *args, **kwargs)
def ensure_int(f):
i = int(f)
if i != f:
raise ValueError("Could not coerce %f to integer" % f)
return i
def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):
""" Normalize chunks to tuple of tuples
This takes in a variety of input types and information and produces a full
tuple-of-tuples result for chunks, suitable to be passed to Array or
rechunk or any other operation that creates a Dask array.
Parameters
----------
chunks: tuple, int, dict, or string
The chunks to be normalized. See examples below for more details
shape: Tuple[int]
The shape of the array
limit: int (optional)
The maximum block size to target in bytes,
if freedom is given to choose
dtype: np.dtype
previous_chunks: Tuple[Tuple[int]] optional
Chunks from a previous array that we should use for inspiration when
rechunking auto dimensions. If not provided but auto-chunking exists
then auto-dimensions will prefer square-like chunk shapes.
Examples
--------
Specify uniform chunk sizes
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
Also passes through fully explicit tuple-of-tuples
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
Cleans up lists to tuples
>>> normalize_chunks([[2, 2], [3, 3]])
((2, 2), (3, 3))
Expands integer inputs 10 -> (10, 10)
>>> normalize_chunks(10, shape=(30, 5))
((10, 10, 10), (5,))
Expands dict inputs
>>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))
((2, 2, 2), (3, 3))
The values -1 and None get mapped to full size
>>> normalize_chunks((5, -1), shape=(10, 10))
((5, 5), (10,))
Use the value "auto" to automatically determine chunk sizes along certain
dimensions. This uses the ``limit=`` and ``dtype=`` keywords to
determine how large to make the chunks. The term "auto" can be used
anywhere an integer can be used. See array chunking documentation for more
information.
>>> normalize_chunks(("auto",), shape=(20,), limit=5, dtype='uint8')
((5, 5, 5, 5),)
You can also use byte sizes (see ``dask.utils.parse_bytes``) in place of
"auto" to ask for a particular size
>>> normalize_chunks("1kiB", shape=(2000,), dtype='float32')
((250, 250, 250, 250, 250, 250, 250, 250),)
Respects null dimensions
>>> normalize_chunks((), shape=(0, 0))
((0,), (0,))
"""
if dtype and not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if chunks is None:
raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, (Number, str)):
chunks = (chunks,) * len(shape)
if isinstance(chunks, dict):
chunks = tuple(chunks.get(i, None) for i in range(len(shape)))
if isinstance(chunks, np.ndarray):
chunks = chunks.tolist()
if not chunks and shape and all(s == 0 for s in shape):
chunks = ((0,),) * len(shape)
if (
shape
and len(shape) == 1
and len(chunks) > 1
and all(isinstance(c, (Number, str)) for c in chunks)
):
chunks = (chunks,)
if shape and len(chunks) != len(shape):
raise ValueError(
"Chunks and shape must be of the same length/dimension. "
"Got chunks=%s, shape=%s" % (chunks, shape)
)
if -1 in chunks or None in chunks:
chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))
# If specifying chunk size in bytes, use that value to set the limit.
# Verify there is only one consistent value of limit or chunk-bytes used.
for c in chunks:
if isinstance(c, str) and c != "auto":
parsed = parse_bytes(c)
if limit is None:
limit = parsed
elif parsed != limit:
raise ValueError(
"Only one consistent value of limit or chunk is allowed."
"Used %s != %s" % (parsed, limit)
)
# Substitute byte limits with 'auto' now that limit is set.
chunks = tuple("auto" if isinstance(c, str) and c != "auto" else c for c in chunks)
if any(c == "auto" for c in chunks):
chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)
if shape is not None:
chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))
if chunks and shape is not None:
chunks = sum(
(
blockdims_from_blockshape((s,), (c,))
if not isinstance(c, (tuple, list))
else (c,)
for s, c in zip(shape, chunks)
),
(),
)
for c in chunks:
if not c:
raise ValueError(
"Empty tuples are not allowed in chunks. Express "
"zero length dimensions with 0(s) in chunks"
)
if shape is not None:
if len(chunks) != len(shape):
raise ValueError(
"Input array has %d dimensions but the supplied "
"chunks has only %d dimensions" % (len(shape), len(chunks))
)
if not all(
c == s or (math.isnan(c) or math.isnan(s))
for c, s in zip(map(sum, chunks), shape)
):
raise ValueError(
"Chunks do not add up to shape. "
"Got chunks=%s, shape=%s" % (chunks, shape)
)
return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)
def _compute_multiplier(limit: int, dtype, largest_block: int, result):
"""
Utility function for auto_chunk, to fin how much larger or smaller the ideal
chunk size is relative to what we have now.
"""
return (
limit
/ dtype.itemsize
/ largest_block
/ np.prod(list(r if r != 0 else 1 for r in result.values()))
)
def auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):
""" Determine automatic chunks
This takes in a chunks value that contains ``"auto"`` values in certain
dimensions and replaces those values with concrete dimension sizes that try
to get chunks to be of a certain size in bytes, provided by the ``limit=``
keyword. If multiple dimensions are marked as ``"auto"`` then they will
all respond to meet the desired byte limit, trying to respect the aspect
ratio of their dimensions in ``previous_chunks=``, if given.
Parameters
----------
chunks: Tuple
A tuple of either dimensions or tuples of explicit chunk dimensions
Some entries should be "auto"
shape: Tuple[int]
limit: int, str
The maximum allowable size of a chunk in bytes
previous_chunks: Tuple[Tuple[int]]
See also
--------
normalize_chunks: for full docstring and parameters
"""
if previous_chunks is not None:
previous_chunks = tuple(
c if isinstance(c, tuple) else (c,) for c in previous_chunks
)
chunks = list(chunks)
autos = {i for i, c in enumerate(chunks) if c == "auto"}
if not autos:
return tuple(chunks)
if limit is None:
limit = config.get("array.chunk-size")
if isinstance(limit, str):
limit = parse_bytes(limit)
if dtype is None:
raise TypeError("DType must be known for auto-chunking")
if dtype.hasobject:
raise NotImplementedError(
"Can not use auto rechunking with object dtype. "
"We are unable to estimate the size in bytes of object data"
)
for x in tuple(chunks) + tuple(shape):
if (
isinstance(x, Number)
and np.isnan(x)
or isinstance(x, tuple)
and np.isnan(x).any()
):
raise ValueError(
"Can not perform automatic rechunking with unknown "
"(nan) chunk sizes.%s" % unknown_chunk_message
)
limit = max(1, limit)
largest_block = np.prod(
[cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != "auto"]
)
if previous_chunks:
# Base ideal ratio on the median chunk size of the previous chunks
result = {a: np.median(previous_chunks[a]) for a in autos}
ideal_shape = []
for i, s in enumerate(shape):
chunk_frequencies = frequencies(previous_chunks[i])
mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])
if mode > 1 and count >= len(previous_chunks[i]) / 2:
ideal_shape.append(mode)
else:
ideal_shape.append(s)
# How much larger or smaller the ideal chunk size is relative to what we have now
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
last_multiplier = 0
last_autos = set()
while (
multiplier != last_multiplier or autos != last_autos
): # while things change
last_multiplier = multiplier # record previous values
last_autos = set(autos) # record previous values
# Expand or contract each of the dimensions appropriately
for a in sorted(autos):
if ideal_shape[a] == 0:
result[a] = 0
continue
proposed = result[a] * multiplier ** (1 / len(autos))
if proposed > shape[a]: # we've hit the shape boundary
autos.remove(a)
largest_block *= shape[a]
chunks[a] = shape[a]
del result[a]
else:
result[a] = round_to(proposed, ideal_shape[a])
# recompute how much multiplier we have left, repeat
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
for k, v in result.items():
chunks[k] = v
return tuple(chunks)
else:
size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))
small = [i for i in autos if shape[i] < size]
if small:
for i in small:
chunks[i] = (shape[i],)
return auto_chunks(chunks, shape, limit, dtype)
for i in autos:
chunks[i] = round_to(size, shape[i])
return tuple(chunks)
def round_to(c, s):
""" Return a chunk dimension that is close to an even multiple or factor
We want values for c that are nicely aligned with s.
If c is smaller than s then we want the largest factor of s that is less than the
desired chunk size, but not less than half, which is too much. If no such
factor exists then we just go with the original chunk size and accept an
uneven chunk at the end.
If c is larger than s then we want the largest multiple of s that is still
smaller than c.
"""
if c <= s:
try:
return max(f for f in factors(s) if c / 2 <= f <= c)
except ValueError: # no matching factors within factor of two
return max(1, int(c))
else:
return c // s * s
def _get_chunk_shape(a):
s = np.asarray(a.shape, dtype=int)
return s[len(s) * (None,) + (slice(None),)]
def from_array(
x,
chunks="auto",
name=None,
lock=False,
asarray=None,
fancy=True,
getitem=None,
meta=None,
):
""" Create dask array from something that looks like an array
Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.
Parameters
----------
x : array_like
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
- A size in bytes, like "100 MiB" which will choose a uniform
block-like shape
- The word "auto" which acts like the above, but uses a configuration
value ``array.chunk-size`` for the chunk size
-1 or None as a blocksize indicate the size of the corresponding
dimension.
name : str, optional
The key name to use for the array. Defaults to a hash of ``x``.
By default, hash uses python's standard sha1. This behaviour can be
changed by installing cityhash, xxhash or murmurhash. If installed,
a large-factor speedup can be obtained in the tokenisation step.
Use ``name=False`` to generate a random name instead of hashing (fast)
.. note::
Because this ``name`` is used as the key in task graphs, you should
ensure that it uniquely identifies the data contained within. If
you'd like to provide a descriptive name that is still unique, combine
the descriptive name with :func:`dask.base.tokenize` of the
``array_like``. See :ref:`graphs` for more.
lock : bool or Lock, optional
If ``x`` doesn't support concurrent reads then provide a lock here, or
pass in True to have dask.array create one for you.
asarray : bool, optional
If True then call np.asarray on chunks to convert them to numpy arrays.
If False then chunks are passed through unchanged.
If None (default) then we use True if the ``__array_function__`` method
is undefined.
fancy : bool, optional
If ``x`` doesn't support fancy indexing (e.g. indexing with lists or
arrays) then set to False. Default is True.
meta : Array-like, optional
The metadata for the resulting dask array. This is the kind of array
that will result from slicing the input array.
Defaults to the input array.
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr
datasets do) then a multiple of that chunk shape will be used if you
do not provide a chunk shape.
>>> a = da.from_array(x, chunks='auto') # doctest: +SKIP
>>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP
>>> a = da.from_array(x) # doctest: +SKIP
If providing a name, ensure that it is unique
>>> import dask.base
>>> token = dask.base.tokenize(x) # doctest: +SKIP
>>> a = da.from_array('myarray-' + token) # doctest: +SKIP
"""
if isinstance(x, Array):
raise ValueError(
"Array is already a dask array. Use 'asarray' or " "'rechunk' instead."
)
elif is_dask_collection(x):
warnings.warn(
"Passing an object to dask.array.from_array which is already a "
"Dask collection. This can lead to unexpected behavior."
)
if isinstance(x, (list, tuple, memoryview) + np.ScalarType):
x = np.array(x)
if asarray is None:
asarray = not hasattr(x, "__array_function__")
previous_chunks = getattr(x, "chunks", None)
chunks = normalize_chunks(
chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks
)
if name in (None, True):
token = tokenize(x, chunks)
original_name = "array-original-" + token
name = name or "array-" + token
elif name is False:
original_name = name = "array-" + str(uuid.uuid1())
else:
original_name = name
if lock is True:
lock = SerializableLock()
# Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)
# because np.matrix is a subclass of np.ndarray.
if type(x) is np.ndarray and all(len(c) == 1 for c in chunks):
# No slicing needed
dsk = {(name,) + (0,) * x.ndim: x}
else:
if getitem is None:
if type(x) is np.ndarray and not lock:
# simpler and cleaner, but missing all the nuances of getter
getitem = operator.getitem
elif fancy:
getitem = getter
else:
getitem = getter_nofancy
dsk = getem(
original_name,
chunks,
getitem=getitem,
shape=x.shape,
out_name=name,
lock=lock,
asarray=asarray,
dtype=x.dtype,
)
dsk[original_name] = x
# Workaround for TileDB, its indexing is 1-based,
# and doesn't seems to support 0-length slicing
if x.__class__.__module__.split(".")[0] == "tiledb" and hasattr(x, "_ctx_"):
return Array(dsk, name, chunks, dtype=x.dtype)
if meta is None:
meta = x
return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, "dtype", None))
def from_zarr(
url, component=None, storage_options=None, chunks=None, name=None, **kwargs
):
"""Load array from the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be loaded, something like ``'foo/bar'``.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
chunks: tuple of ints or tuples of ints
Passed to ``da.from_array``, allows setting the chunks on
initialisation, if the chunking scheme in the on-disc dataset is not
optimal for the calculations to follow.
name : str, optional
An optional keyname for the array. Defaults to hashing the input
kwargs: passed to ``zarr.Array``.
"""
import zarr
storage_options = storage_options or {}
if isinstance(url, zarr.Array):
z = url
elif isinstance(url, str):
from ..bytes.core import get_mapper
mapper = get_mapper(url, **storage_options)
z = zarr.Array(mapper, read_only=True, path=component, **kwargs)
else:
mapper = url
z = zarr.Array(mapper, read_only=True, path=component, **kwargs)
chunks = chunks if chunks is not None else z.chunks
if name is None:
name = "from-zarr-" + tokenize(z, component, storage_options, chunks, **kwargs)
return from_array(z, chunks, name=name)
def to_zarr(
arr,
url,
component=None,
storage_options=None,
overwrite=False,
compute=True,
return_stored=False,
**kwargs,
):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
arr: dask.array
Data to store
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be created/over-written.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
overwrite: bool
If given array already exists, overwrite=False will cause an error,
where overwrite=True will replace the existing data. Note that this
check is done at computation time, not during graph creation.
compute, return_stored: see ``store()``
kwargs: passed to the ``zarr.create()`` function, e.g., compression options
Raises
------
ValueError
If ``arr`` has unknown chunk sizes, which is not supported by Zarr.
See Also
--------
dask.array.Array.compute_chunk_sizes
"""
import zarr
if np.isnan(arr.shape).any():
raise ValueError(
"Saving a dask array with unknown chunk sizes is not "
"currently supported by Zarr.%s" % unknown_chunk_message
)
if isinstance(url, zarr.Array):
z = url
if isinstance(z.store, (dict, zarr.DictStore)) and "distributed" in config.get(
"scheduler", ""
):
raise RuntimeError(
"Cannot store into in memory Zarr Array using "
"the Distributed Scheduler."
)
arr = arr.rechunk(z.chunks)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
if not _check_regular_chunks(arr.chunks):
raise ValueError(
"Attempt to save array to zarr with irregular "
"chunking, please call `arr.rechunk(...)` first."
)
storage_options = storage_options or {}
if isinstance(url, str):
from ..bytes.core import get_mapper
mapper = get_mapper(url, **storage_options)
else:
# assume the object passed is already a mapper
mapper = url
chunks = [c[0] for c in arr.chunks]
# The zarr.create function has the side-effect of immediately
# creating metadata on disk. This may not be desired,
# particularly if compute=False. The caller may be creating many
# arrays on a slow filesystem, with the desire that any I/O be
# sharded across workers (not done serially on the originating
# machine). Or the caller may decide later to not to do this
# computation, and so nothing should be written to disk.
z = delayed(zarr.create)(
shape=arr.shape,
chunks=chunks,
dtype=arr.dtype,
store=mapper,
path=component,
overwrite=overwrite,
**kwargs,
)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
def _check_regular_chunks(chunkset):
"""Check if the chunks are regular
"Regular" in this context means that along every axis, the chunks all
have the same size, except the last one, which may be smaller
Parameters
----------
chunkset: tuple of tuples of ints
From the ``.chunks`` attribute of an ``Array``
Returns
-------
True if chunkset passes, else False
Examples
--------
>>> import dask.array as da
>>> arr = da.zeros(10, chunks=(5, ))
>>> _check_regular_chunks(arr.chunks)
True
>>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))
>>> _check_regular_chunks(arr.chunks)
True
>>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))
>>> _check_regular_chunks(arr.chunks)
False
"""
for chunks in chunkset:
if len(chunks) == 1:
continue
if len(set(chunks[:-1])) > 1:
return False
if chunks[-1] > chunks[0]:
return False
return True
def from_delayed(value, shape, dtype=None, meta=None, name=None):
""" Create a dask array from a dask delayed value
This routine is useful for constructing dask arrays in an ad-hoc fashion
using dask delayed, particularly when combined with stack and concatenate.
The dask array will consist of a single chunk.
Examples
--------
>>> import dask
>>> import dask.array as da
>>> value = dask.delayed(np.ones)(5)
>>> array = da.from_delayed(value, (5,), dtype=float)
>>> array
dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
>>> array.compute()
array([1., 1., 1., 1., 1.])
"""
from ..delayed import delayed, Delayed
if not isinstance(value, Delayed) and hasattr(value, "key"):
value = delayed(value)
name = name or "from-value-" + tokenize(value, shape, dtype, meta)
dsk = {(name,) + (0,) * len(shape): value.key}
chunks = tuple((d,) for d in shape)
# TODO: value._key may not be the name of the layer in value.dask
# This should be fixed after we build full expression graphs
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])
return Array(graph, name, chunks, dtype=dtype, meta=meta)
def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):
""" Create dask array in a single block by calling a function
Calling the provided function with func(*args, **kwargs) should return a
NumPy array of the indicated shape and dtype.
Examples
--------
>>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))
>>> a.compute()
array([0, 1, 2])
This works particularly well when coupled with dask.array functions like
concatenate and stack:
>>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]
>>> stack(arrays).compute()
array([0, 1, 2, 3, 4])
"""
name = name or "from_func-" + tokenize(func, shape, dtype, args, kwargs)
if args or kwargs:
func = partial(func, *args, **kwargs)
dsk = {(name,) + (0,) * len(shape): (func,)}
chunks = tuple((i,) for i in shape)
return Array(dsk, name, chunks, dtype)
def common_blockdim(blockdims):
""" Find the common block dimensions from the list of block dimensions
Currently only implements the simplest possible heuristic: the common
block-dimension is the only one that does not span fully span a dimension.
This is a conservative choice that allows us to avoid potentially very
expensive rechunking.
Assumes that each element of the input block dimensions has all the same
sum (i.e., that they correspond to dimensions of the same size).
Examples
--------
>>> common_blockdim([(3,), (2, 1)])
(2, 1)
>>> common_blockdim([(1, 2), (2, 1)])
(1, 1, 1)
>>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Chunks do not align
"""
if not any(blockdims):
return ()
non_trivial_dims = set([d for d in blockdims if len(d) > 1])
if len(non_trivial_dims) == 1:
return first(non_trivial_dims)
if len(non_trivial_dims) == 0:
return max(blockdims, key=first)
if np.isnan(sum(map(sum, blockdims))):
raise ValueError(
"Arrays chunk sizes (%s) are unknown.\n\n"
"A possible solution:\n"
" x.compute_chunk_sizes()" % blockdims
)
if len(set(map(sum, non_trivial_dims))) > 1:
raise ValueError("Chunks do not add up to same value", blockdims)
# We have multiple non-trivial chunks on this axis
# e.g. (5, 2) and (4, 3)
# We create a single chunk tuple with the same total length
# that evenly divides both, e.g. (4, 1, 2)
# To accomplish this we walk down all chunk tuples together, finding the
# smallest element, adding it to the output, and subtracting it from all
# other elements and remove the element itself. We stop once we have
# burned through all of the chunk tuples.
# For efficiency's sake we reverse the lists so that we can pop off the end
rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]
total = sum(first(non_trivial_dims))
i = 0
out = []
while i < total:
m = min(c[-1] for c in rchunks)
out.append(m)
for c in rchunks:
c[-1] -= m
if c[-1] == 0:
c.pop()
i += m
return tuple(out)
def unify_chunks(*args, **kwargs):
"""
Unify chunks across a sequence of arrays
This utility function is used within other common operations like
``map_blocks`` and ``blockwise``. It is not commonly used by end-users
directly.
Parameters
----------
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
Examples
--------
>>> import dask.array as da
>>> x = da.ones(10, chunks=((5, 2, 3),))
>>> y = da.ones(10, chunks=((2, 3, 5),))
>>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')
>>> chunkss
{'i': (2, 3, 2, 3)}
>>> x = da.ones((100, 10), chunks=(20, 5))
>>> y = da.ones((10, 100), chunks=(4, 50))
>>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)
>>> chunkss # doctest: +SKIP
{'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}
>>> unify_chunks(0, None)
({}, [0])
Returns
-------
chunkss : dict
Map like {index: chunks}.
arrays : list
List of rechunked arrays.
See Also
--------
common_blockdim
"""
if not args:
return {}, []
arginds = [
(asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)
] # [x, ij, y, jk]
args = list(concat(arginds)) # [(x, ij), (y, jk)]
warn = kwargs.get("warn", True)
arrays, inds = zip(*arginds)
if all(ind is None for ind in inds):
return {}, list(arrays)
if all(ind == inds[0] for ind in inds) and all(
a.chunks == arrays[0].chunks for a in arrays
):
return dict(zip(inds[0], arrays[0].chunks)), arrays
nameinds = []
blockdim_dict = dict()
max_parts = 0
for a, ind in arginds:
if ind is not None:
nameinds.append((a.name, ind))
blockdim_dict[a.name] = a.chunks
max_parts = max(max_parts, a.npartitions)
else:
nameinds.append((a, ind))
chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)
nparts = np.prod(list(map(len, chunkss.values())))
if warn and nparts and nparts >= max_parts * 10:
warnings.warn(
"Increasing number of chunks by factor of %d" % (nparts / max_parts),
PerformanceWarning,
stacklevel=3,
)
arrays = []
for a, i in arginds:
if i is None:
arrays.append(a)
else:
chunks = tuple(
chunkss[j]
if a.shape[n] > 1
else a.shape[n]
if not np.isnan(sum(chunkss[j]))
else None
for n, j in enumerate(i)
)
if chunks != a.chunks and all(a.chunks):
arrays.append(a.rechunk(chunks))
else:
arrays.append(a)
return chunkss, arrays
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array('2000-01-01', dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
def block(arrays, allow_unknown_chunksizes=False):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated along the last
dimension (-1), then these are concatenated along the second-last
dimension (-2), and so on until the outermost list is reached
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays together.
stack : Stack arrays in sequence along a new dimension.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
When called with only scalars, ``block`` is equivalent to an ndarray
call. So ``block([[1, 2], [3, 4]])`` is equivalent to
``array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``block([[A, B, ...], [p, q, ...]])``.
"""
# This was copied almost verbatim from numpy.core.shape_base.block
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
def atleast_nd(x, ndim):
x = asanyarray(x)
diff = max(ndim - x.ndim, 0)
if diff == 0:
return x
else:
return x[(None,) * diff + (Ellipsis,)]
def format_index(index):
return "arrays" + "".join("[{}]".format(i) for i in index)
rec = _Recurser(recurse_if=lambda x: type(x) is list)
# ensure that the lists are all matched in depth
list_ndim = None
any_empty = False
for index, value, entering in rec.walk(arrays):
if type(value) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
"{} is a tuple. "
"Only lists can be used to arrange blocks, and np.block does "
"not allow implicit conversion from tuple to ndarray.".format(
format_index(index)
)
)
if not entering:
curr_depth = len(index)
elif len(value) == 0:
curr_depth = len(index) + 1
any_empty = True
else:
continue
if list_ndim is not None and list_ndim != curr_depth:
raise ValueError(
"List depths are mismatched. First element was at depth {}, "
"but there is an element at depth {} ({})".format(
list_ndim, curr_depth, format_index(index)
)
)
list_ndim = curr_depth
# do this here so we catch depth mismatches first
if any_empty:
raise ValueError("Lists cannot be empty")
# convert all the arrays to ndarrays
arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)
# determine the maximum dimension of the elements
elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)
ndim = max(list_ndim, elem_ndim)
# first axis to concatenate along
first_axis = ndim - list_ndim
# Make all the elements the same dimension
arrays = rec.map_reduce(
arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list
)
# concatenate innermost lists on the right, outermost on the left
return rec.map_reduce(
arrays,
f_reduce=lambda xs, axis: concatenate(
list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes
),
f_kwargs=lambda axis: dict(axis=(axis + 1)),
axis=first_axis,
)
def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
from . import wrap
seq = [asarray(a) for a in seq]
if not seq:
raise ValueError("Need array(s) to concatenate")
meta = np.concatenate([meta_from_array(s) for s in seq], axis=axis)
# Promote types to match meta
seq = [a.astype(meta.dtype) for a in seq]
# Find output array shape
ndim = len(seq[0].shape)
shape = tuple(
sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]
for i in range(ndim)
)
# Drop empty arrays
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
if axis < 0:
axis = ndim + axis
if axis >= ndim:
msg = (
"Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d"
)
raise ValueError(msg % (ndim, axis))
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
elif n == 1:
return seq2[0]
if not allow_unknown_chunksizes and not all(
i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)
for i in range(ndim)
):
if any(map(np.isnan, seq2[0].shape)):
raise ValueError(
"Tried to concatenate arrays with unknown"
" shape %s.\n\nTwo solutions:\n"
" 1. Force concatenation pass"
" allow_unknown_chunksizes=True.\n"
" 2. Compute shapes with "
"[x.compute_chunk_sizes() for x in seq]" % str(seq2[0].shape)
)
raise ValueError("Shapes do not align: %s", [x.shape for x in seq2])
inds = [list(range(ndim)) for i in range(n)]
for i, ind in enumerate(inds):
ind[axis] = -(i + 1)
uc_args = list(concat(zip(seq2, inds)))
_, seq2 = unify_chunks(*uc_args, warn=False)
bds = [a.chunks for a in seq2]
chunks = (
seq2[0].chunks[:axis]
+ (sum([bd[axis] for bd in bds], ()),)
+ seq2[0].chunks[axis + 1 :]
)
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))
names = [a.name for a in seq2]
name = "concatenate-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
values = [
(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1 : axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[axis + 2 :]
for key in keys
]
dsk = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
def load_store_chunk(x, out, index, lock, return_stored, load_stored):
"""
A function inserted in a Dask graph for storing a chunk.
Parameters
----------
x: array-like
An array (potentially a NumPy one)
out: array-like
Where to store results too.
index: slice-like
Where to store result from ``x`` in ``out``.
lock: Lock-like or False
Lock to use before writing to ``out``.
return_stored: bool
Whether to return ``out``.
load_stored: bool
Whether to return the array stored in ``out``.
Ignored if ``return_stored`` is not ``True``.
Examples
--------
>>> a = np.ones((5, 6))
>>> b = np.empty(a.shape)
>>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)
"""
result = None
if return_stored and not load_stored:
result = out
if lock:
lock.acquire()
try:
if x is not None:
out[index] = np.asanyarray(x)
if return_stored and load_stored:
result = out[index]
finally:
if lock:
lock.release()
return result
def store_chunk(x, out, index, lock, return_stored):
return load_store_chunk(x, out, index, lock, return_stored, False)
def load_chunk(out, index, lock):
return load_store_chunk(None, out, index, lock, True, True)
def insert_to_ooc(
arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None
):
"""
Creates a Dask graph for storing chunks from ``arr`` in ``out``.
Parameters
----------
arr: da.Array
A dask array
out: array-like
Where to store results too.
lock: Lock-like or bool, optional
Whether to lock or with what (default is ``True``,
which means a ``threading.Lock`` instance).
region: slice-like, optional
Where in ``out`` to store ``arr``'s results
(default is ``None``, meaning all of ``out``).
return_stored: bool, optional
Whether to return ``out``
(default is ``False``, meaning ``None`` is returned).
load_stored: bool, optional
Whether to handling loading from ``out`` at the same time.
Ignored if ``return_stored`` is not ``True``.
(default is ``False``, meaning defer to ``return_stored``).
tok: str, optional
Token to use when naming keys
Examples
--------
>>> import dask.array as da
>>> d = da.ones((5, 6), chunks=(2, 3))
>>> a = np.empty(d.shape)
>>> insert_to_ooc(d, a) # doctest: +SKIP
"""
if lock is True:
lock = Lock()
slices = slices_from_chunks(arr.chunks)
if region:
slices = [fuse_slice(region, slc) for slc in slices]
name = "store-%s" % (tok or str(uuid.uuid1()))
func = store_chunk
args = ()
if return_stored and load_stored:
name = "load-%s" % name
func = load_store_chunk
args = args + (load_stored,)
dsk = {
(name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args
for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)
}
return dsk
def retrieve_from_ooc(keys, dsk_pre, dsk_post=None):
"""
Creates a Dask graph for loading stored ``keys`` from ``dsk``.
Parameters
----------
keys: Sequence
A sequence containing Dask graph keys to load
dsk_pre: Mapping
A Dask graph corresponding to a Dask Array before computation
dsk_post: Mapping, optional
A Dask graph corresponding to a Dask Array after computation
Examples
--------
>>> import dask.array as da
>>> d = da.ones((5, 6), chunks=(2, 3))
>>> a = np.empty(d.shape)
>>> g = insert_to_ooc(d, a)
>>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP
"""
if not dsk_post:
dsk_post = {k: k for k in keys}
load_dsk = {
("load-" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]
for k in keys
}
return load_dsk
def asarray(a, **kwargs):
"""Convert the input to a dask array.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.startswith("xarray.") and hasattr(a, "data"):
return asarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
return stack(a)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asarray(a)
return from_array(a, getitem=getter_inline, **kwargs)
def asanyarray(a):
"""Convert the input to a dask array.
Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asanyarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asanyarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.startswith("xarray.") and hasattr(a, "data"):
return asanyarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
a = stack(a)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asanyarray(a)
return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)
def is_scalar_for_elemwise(arg):
"""
>>> is_scalar_for_elemwise(42)
True
>>> is_scalar_for_elemwise('foo')
True
>>> is_scalar_for_elemwise(True)
True
>>> is_scalar_for_elemwise(np.array(42))
True
>>> is_scalar_for_elemwise([1, 2, 3])
True
>>> is_scalar_for_elemwise(np.array([1, 2, 3]))
False
>>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))
False
>>> is_scalar_for_elemwise(np.dtype('i4'))
True
"""
# the second half of shape_condition is essentially just to ensure that
# dask series / frame are treated as scalars in elemwise.
maybe_shape = getattr(arg, "shape", None)
shape_condition = not isinstance(maybe_shape, Iterable) or any(
is_dask_collection(x) for x in maybe_shape
)
return (
np.isscalar(arg)
or shape_condition
or isinstance(arg, np.dtype)
or (isinstance(arg, np.ndarray) and arg.ndim == 0)
)
def broadcast_shapes(*shapes):
"""
Determines output shape from broadcasting arrays.
Parameters
----------
shapes : tuples
The shapes of the arguments.
Returns
-------
output_shape : tuple
Raises
------
ValueError
If the input shapes cannot be successfully broadcast together.
"""
if len(shapes) == 1:
return shapes[0]
out = []
for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):
if np.isnan(sizes).any():
dim = np.nan
else:
dim = 0 if 0 in sizes else np.max(sizes)
if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):
raise ValueError(
"operands could not be broadcast together with "
"shapes {0}".format(" ".join(map(str, shapes)))
)
out.append(dim)
return tuple(reversed(out))
def elemwise(op, *args, **kwargs):
""" Apply elementwise function across arguments
Respects broadcasting rules
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
See Also
--------
blockwise
"""
out = kwargs.pop("out", None)
if not set(["name", "dtype"]).issuperset(kwargs):
msg = "%s does not take the following keyword arguments %s"
raise TypeError(
msg % (op.__name__, str(sorted(set(kwargs) - set(["name", "dtype"]))))
)
args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]
shapes = []
for arg in args:
shape = getattr(arg, "shape", ())
if any(is_dask_collection(x) for x in shape):
# Want to excluded Delayed shapes and dd.Scalar
shape = ()
shapes.append(shape)
shapes = [s if isinstance(s, Iterable) else () for s in shapes]
out_ndim = len(
broadcast_shapes(*shapes)
) # Raises ValueError if dimensions mismatch
expr_inds = tuple(range(out_ndim))[::-1]
need_enforce_dtype = False
if "dtype" in kwargs:
dt = kwargs["dtype"]
else:
# We follow NumPy's rules for dtype promotion, which special cases
# scalars and 0d ndarrays (which it considers equivalent) by using
# their values to compute the result dtype:
# https://github.com/numpy/numpy/issues/6240
# We don't inspect the values of 0d dask arrays, because these could
# hold potentially very expensive calculations. Instead, we treat
# them just like other arrays, and if necessary cast the result of op
# to match.
vals = [
np.empty((1,) * max(1, a.ndim), dtype=a.dtype)
if not is_scalar_for_elemwise(a)
else a
for a in args
]
try:
dt = apply_infer_dtype(op, vals, {}, "elemwise", suggest_dtype=False)
except Exception:
return NotImplemented
need_enforce_dtype = any(
not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args
)
name = kwargs.get("name", None) or "%s-%s" % (funcname(op), tokenize(op, dt, *args))
blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip("_"))
if need_enforce_dtype:
blockwise_kwargs["enforce_dtype"] = dt
blockwise_kwargs["enforce_dtype_function"] = op
op = _enforce_dtype
result = blockwise(
op,
expr_inds,
*concat(
(a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)
for a in args
),
**blockwise_kwargs,
)
return handle_out(out, result)
def handle_out(out, result):
""" Handle out parameters
If out is a dask.array then this overwrites the contents of that array with
the result
"""
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
elif len(out) > 1:
raise NotImplementedError("The out parameter is not fully supported")
else:
out = None
if isinstance(out, Array):
if out.shape != result.shape:
raise ValueError(
"Mismatched shapes between result and out parameter. "
"out=%s, result=%s" % (str(out.shape), str(result.shape))
)
out._chunks = result.chunks
out.dask = result.dask
out._meta = result._meta
out.name = result.name
elif out is not None:
msg = (
"The out parameter is not fully supported."
" Received type %s, expected Dask Array" % type(out).__name__
)
raise NotImplementedError(msg)
else:
return result
def _enforce_dtype(*args, **kwargs):
"""Calls a function and converts its result to the given dtype.
The parameters have deliberately been given unwieldy names to avoid
clashes with keyword arguments consumed by blockwise
A dtype of `object` is treated as a special case and not enforced,
because it is used as a dummy value in some places when the result will
not be a block in an Array.
Parameters
----------
enforce_dtype : dtype
Result dtype
enforce_dtype_function : callable
The wrapped function, which will be passed the remaining arguments
"""
dtype = kwargs.pop("enforce_dtype")
function = kwargs.pop("enforce_dtype_function")
result = function(*args, **kwargs)
if hasattr(result, "dtype") and dtype != result.dtype and dtype != object:
if not np.can_cast(result, dtype, casting="same_kind"):
raise ValueError(
"Inferred dtype from function %r was %r "
"but got %r, which can't be cast using "
"casting='same_kind'"
% (funcname(function), str(dtype), str(result.dtype))
)
if np.isscalar(result):
# scalar astype method doesn't take the keyword arguments, so
# have to convert via 0-dimensional array and back.
result = result.astype(dtype)
else:
try:
result = result.astype(dtype, copy=False)
except TypeError:
# Missing copy kwarg
result = result.astype(dtype)
return result
def broadcast_to(x, shape, chunks=None):
"""Broadcast an array to a new shape.
Parameters
----------
x : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
chunks : tuple, optional
If provided, then the result will use these chunks instead of the same
chunks as the source array. Setting chunks explicitly as part of
broadcast_to is more efficient than rechunking afterwards. Chunks are
only allowed to differ from the original shape along dimensions that
are new on the result or have size 1 the input array.
Returns
-------
broadcast : dask array
See Also
--------
:func:`numpy.broadcast_to`
"""
x = asarray(x)
shape = tuple(shape)
if x.shape == shape and (chunks is None or chunks == x.chunks):
return x
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(
new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1
):
raise ValueError("cannot broadcast shape %s to shape %s" % (x.shape, shape))
if chunks is None:
chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(
bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])
)
else:
chunks = normalize_chunks(
chunks, shape, dtype=x.dtype, previous_chunks=x.chunks
)
for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):
if old_bd != new_bd and old_bd != (1,):
raise ValueError(
"cannot broadcast chunks %s to chunks %s: "
"new chunks must either be along a new "
"dimension or a dimension of size 1" % (x.chunks, chunks)
)
name = "broadcast_to-" + tokenize(x, shape, chunks)
dsk = {}
enumerated_chunks = product(*(enumerate(bds) for bds in chunks))
for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):
old_index = tuple(
0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])
)
old_key = (x.name,) + old_index
new_key = (name,) + new_index
dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, dtype=x.dtype)
@derived_from(np)
def broadcast_arrays(*args, **kwargs):
subok = bool(kwargs.pop("subok", False))
to_array = asanyarray if subok else asarray
args = tuple(to_array(e) for e in args)
if kwargs:
raise TypeError("unsupported keyword argument(s) provided")
# Unify uneven chunking
inds = [list(reversed(range(x.ndim))) for x in args]
uc_args = concat(zip(args, inds))
_, args = unify_chunks(*uc_args, warn=False)
shape = broadcast_shapes(*(e.shape for e in args))
chunks = broadcast_chunks(*(e.chunks for e in args))
result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]
return result
def offset_func(func, offset, *args):
""" Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with ignoring(Exception):
_offset.__name__ = "offset_" + func.__name__
return _offset
def chunks_from_arrays(arrays):
""" Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
>>> chunks_from_arrays([1, 1])
((1, 1),)
"""
if not arrays:
return ()
result = []
dim = 0
def shape(x):
try:
return x.shape
except AttributeError:
return (1,)
while isinstance(arrays, (list, tuple)):
result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
""" First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def shapelist(a):
""" Get the shape of nested list """
if type(a) is list:
return tuple([len(a)] + list(shapelist(a[0])))
else:
return ()
def reshapelist(shape, seq):
""" Reshape iterator to nested shape
>>> reshapelist((2, 3), range(6))
[[0, 1, 2], [3, 4, 5]]
"""
if len(shape) == 1:
return list(seq)
else:
n = int(len(seq) / shape[0])
return [reshapelist(shape[1:], part) for part in partition(n, seq)]
def transposelist(arrays, axes, extradims=0):
""" Permute axes of nested list
>>> transposelist([[1,1,1],[1,1,1]], [2,1])
[[[1, 1], [1, 1], [1, 1]]]
>>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)
[[[[1], [1]], [[1], [1]], [[1], [1]]]]
"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
if extradims < 0:
raise ValueError("`newdims` should be positive")
if len(axes) > len(set(axes)):
raise ValueError("`axes` should be unique")
ndim = max(axes) + 1
shape = shapelist(arrays)
newshape = [
shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)
]
result = list(core.flatten(arrays))
return reshapelist(newshape, result)
def stack(seq, axis=0, allow_unknown_chunksizes=False):
"""
Stack arrays along a new axis
Given a sequence of dask arrays, form a new dask array by stacking them
along a new dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
from . import wrap
seq = [asarray(a) for a in seq]
if not seq:
raise ValueError("Need array(s) to stack")
if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):
idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)
raise ValueError(
"Stacked arrays must have the same shape. "
"The first array had shape {0}, while array "
"{1} has shape {2}.".format(seq[0].shape, idx[0] + 1, idx[1].shape)
)
meta = np.stack([meta_from_array(a) for a in seq], axis=axis)
seq = [x.astype(meta.dtype) for x in seq]
ndim = meta.ndim - 1
if axis < 0:
axis = ndim + axis + 1
shape = tuple(
len(seq)
if i == axis
else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])
for i in range(meta.ndim)
)
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq2))
_, seq2 = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq2)) == 1 # same chunks
chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]
names = [a.name for a in seq2]
name = "stack-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [
(names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys
]
values = [
(
getitem,
inp,
(slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis),
)
for inp in inputs
]
layer = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
def concatenate3(arrays):
""" Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
from .utils import IS_NEP18_ACTIVE
# We need this as __array_function__ may not exist on older NumPy versions.
# And to reduce verbosity.
NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, "__array_function__", None)
arrays = concrete(arrays)
if not arrays:
return np.empty(0)
advanced = max(
core.flatten(arrays, container=(list, tuple)),
key=lambda x: getattr(x, "__array_priority__", 0),
)
if IS_NEP18_ACTIVE and not all(
NDARRAY_ARRAY_FUNCTION
is getattr(arr, "__array_function__", NDARRAY_ARRAY_FUNCTION)
for arr in arrays
):
try:
x = unpack_singleton(arrays)
return _concatenate2(arrays, axes=tuple(range(x.ndim)))
except TypeError:
pass
if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:
x = unpack_singleton(arrays)
return _concatenate2(arrays, axes=list(range(x.ndim)))
ndim = ndimlist(arrays)
if not ndim:
return arrays
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
def dtype(x):
try:
return x.dtype
except AttributeError:
return type(x)
result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))
for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):
if hasattr(arr, "ndim"):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
def concatenate_axes(arrays, axes):
""" Recursively call np.concatenate along axes """
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))
return concatenate3(transposelist(arrays, axes, extradims=extradims))
def to_hdf5(filename, *args, **kwargs):
""" Store arrays in HDF5 file
This saves several dask arrays into several datapaths in an HDF5 file.
It creates the necessary datasets and handles clean file opening/closing.
>>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP
or
>>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP
This can also be used as a method on a single Array
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
if len(args) == 1 and isinstance(args[0], dict):
data = args[0]
elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):
data = {args[0]: args[1]}
else:
raise ValueError("Please provide {'/data/path': array} dictionary")
chunks = kwargs.pop("chunks", True)
import h5py
with h5py.File(filename, mode="a") as f:
dsets = [
f.require_dataset(
dp,
shape=x.shape,
dtype=x.dtype,
chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,
**kwargs,
)
for dp, x in data.items()
]
store(list(data.values()), dsets)
def interleave_none(a, b):
"""
>>> interleave_none([0, None, 2, None], [1, 3])
(0, 1, 2, 3)
"""
result = []
i = j = 0
n = len(a) + len(b)
while i + j < n:
if a[i] is not None:
result.append(a[i])
i += 1
else:
result.append(b[j])
i += 1
j += 1
return tuple(result)
def keyname(name, i, okey):
"""
>>> keyname('x', 3, [None, None, 0, 2])
('x', 3, 0, 2)
"""
return (name, i) + tuple(k for k in okey if k is not None)
def _vindex(x, *indexes):
"""Point wise indexing with broadcasting.
>>> x = np.arange(56).reshape((7, 8))
>>> x
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55]])
>>> d = from_array(x, chunks=(3, 4))
>>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])
>>> result.compute()
array([ 0, 9, 48, 7])
"""
indexes = replace_ellipsis(x.ndim, indexes)
nonfancy_indexes = []
reduced_indexes = []
for i, ind in enumerate(indexes):
if isinstance(ind, Number):
nonfancy_indexes.append(ind)
elif isinstance(ind, slice):
nonfancy_indexes.append(ind)
reduced_indexes.append(slice(None))
else:
nonfancy_indexes.append(slice(None))
reduced_indexes.append(ind)
nonfancy_indexes = tuple(nonfancy_indexes)
reduced_indexes = tuple(reduced_indexes)
x = x[nonfancy_indexes]
array_indexes = {}
for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):
if not isinstance(ind, slice):
ind = np.array(ind, copy=True)
if ind.dtype.kind == "b":
raise IndexError("vindex does not support indexing with boolean arrays")
if ((ind >= size) | (ind < -size)).any():
raise IndexError(
"vindex key has entries out of bounds for "
"indexing along axis %s of size %s: %r" % (i, size, ind)
)
ind %= size
array_indexes[i] = ind
if array_indexes:
x = _vindex_array(x, array_indexes)
return x
def _vindex_array(x, dict_indexes):
"""Point wise indexing with only NumPy Arrays."""
try:
broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())
except ValueError as e:
# note: error message exactly matches numpy
shapes_str = " ".join(str(a.shape) for a in dict_indexes.values())
raise IndexError(
"shape mismatch: indexing arrays could not be "
"broadcast together with shapes " + shapes_str
) from e
broadcast_shape = broadcast_indexes[0].shape
lookup = dict(zip(dict_indexes, broadcast_indexes))
flat_indexes = [
lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)
]
flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))
flat_indexes = [
list(index) if index is not None else index for index in flat_indexes
]
bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]
bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]
axis = _get_axis(flat_indexes)
token = tokenize(x, flat_indexes)
out_name = "vindex-merge-" + token
points = list()
for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):
block_idx = [
np.searchsorted(b, ind, "right") - 1 for b, ind in zip(bounds2, idx)
]
inblock_idx = [
ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))
]
points.append((i, tuple(block_idx), tuple(inblock_idx)))
chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]
chunks.insert(0, (len(points),) if points else (0,))
chunks = tuple(chunks)
if points:
per_block = groupby(1, points)
per_block = dict((k, v) for k, v in per_block.items() if v)
other_blocks = list(
product(
*[
list(range(len(c))) if i is None else [None]
for i, c in zip(flat_indexes, x.chunks)
]
)
)
full_slices = [slice(None, None) if i is None else None for i in flat_indexes]
name = "vindex-slice-" + token
vindex_merge_name = "vindex-merge-" + token
dsk = {}
for okey in other_blocks:
for i, key in enumerate(per_block):
dsk[keyname(name, i, okey)] = (
_vindex_transpose,
(
_vindex_slice,
(x.name,) + interleave_none(okey, key),
interleave_none(
full_slices, list(zip(*pluck(2, per_block[key])))
),
),
axis,
)
dsk[keyname(vindex_merge_name, 0, okey)] = (
_vindex_merge,
[list(pluck(0, per_block[key])) for key in per_block],
[keyname(name, i, okey) for i in range(len(per_block))],
)
result_1d = Array(
HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),
out_name,
chunks,
x.dtype,
)
return result_1d.reshape(broadcast_shape + result_1d.shape[1:])
# output has a zero dimension, just create a new zero-shape array with the
# same dtype
from .wrap import empty
result_1d = empty(
tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name
)
return result_1d.reshape(broadcast_shape + result_1d.shape[1:])
def _get_axis(indexes):
""" Get axis along which point-wise slicing results lie
This is mostly a hack because I can't figure out NumPy's rule on this and
can't be bothered to go reading.
>>> _get_axis([[1, 2], None, [1, 2], None])
0
>>> _get_axis([None, [1, 2], [1, 2], None])
1
>>> _get_axis([None, None, [1, 2], [1, 2]])
2
"""
ndim = len(indexes)
indexes = [slice(None, None) if i is None else [0] for i in indexes]
x = np.empty((2,) * ndim)
x2 = x[tuple(indexes)]
return x2.shape.index(1)
def _vindex_slice(block, points):
""" Pull out point-wise slices from block """
points = [p if isinstance(p, slice) else list(p) for p in points]
return block[tuple(points)]
def _vindex_transpose(block, axis):
""" Rotate block so that points are on the first dimension """
axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))
return block.transpose(axes)
def _vindex_merge(locations, values):
"""
>>> locations = [0], [2, 1]
>>> values = [np.array([[1, 2, 3]]),
... np.array([[10, 20, 30], [40, 50, 60]])]
>>> _vindex_merge(locations, values)
array([[ 1, 2, 3],
[40, 50, 60],
[10, 20, 30]])
"""
locations = list(map(list, locations))
values = list(values)
n = sum(map(len, locations))
shape = list(values[0].shape)
shape[0] = n
shape = tuple(shape)
dtype = values[0].dtype
x = np.empty(shape, dtype=dtype)
ind = [slice(None, None) for i in range(x.ndim)]
for loc, val in zip(locations, values):
ind[0] = loc
x[tuple(ind)] = val
return x
def to_npy_stack(dirname, x, axis=0):
""" Write dask array to a stack of .npy files
This partitions the dask.array along one axis and stores each block along
that axis as a single .npy file in the specified directory
Examples
--------
>>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP
>>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP
The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``
respectively, as is specified by the chunk size along the zeroth axis::
$ tree data/
data/
|-- 0.npy
|-- 1.npy
|-- 2.npy
|-- info
The ``info`` file stores the dtype, chunks, and axis information of the array.
You can load these stacks with the ``da.from_npy_stack`` function.
>>> y = da.from_npy_stack('data/') # doctest: +SKIP
See Also
--------
from_npy_stack
"""
chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))
xx = x.rechunk(chunks)
if not os.path.exists(dirname):
os.mkdir(dirname)
meta = {"chunks": chunks, "dtype": x.dtype, "axis": axis}
with open(os.path.join(dirname, "info"), "wb") as f:
pickle.dump(meta, f)
name = "to-npy-stack-" + str(uuid.uuid1())
dsk = {
(name, i): (np.save, os.path.join(dirname, "%d.npy" % i), key)
for i, key in enumerate(core.flatten(xx.__dask_keys__()))
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])
compute_as_if_collection(Array, graph, list(dsk))
def from_npy_stack(dirname, mmap_mode="r"):
""" Load dask array from stack of npy files
See ``da.to_npy_stack`` for docstring
Parameters
----------
dirname: string
Directory of .npy files
mmap_mode: (None or 'r')
Read data in memory map mode
"""
with open(os.path.join(dirname, "info"), "rb") as f:
info = pickle.load(f)
dtype = info["dtype"]
chunks = info["chunks"]
axis = info["axis"]
name = "from-npy-stack-%s" % dirname
keys = list(product([name], *[range(len(c)) for c in chunks]))
values = [
(np.load, os.path.join(dirname, "%d.npy" % i), mmap_mode)
for i in range(len(chunks[axis]))
]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype)
from .utils import meta_from_array
|
bsd-3-clause
|
silky/sms-tools
|
lectures/09-Sound-description/plots-code/mfcc.py
|
25
|
1103
|
import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients = 12)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
mfccs = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
mfcc_bands, mfcc_coeffs = mfcc(mX)
mfccs.append(mfcc_coeffs)
mfccs = np.array(mfccs)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
numFrames = int(mfccs[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, 1+np.arange(12), np.transpose(mfccs[:,1:]))
plt.ylabel('coefficients')
plt.title('MFCCs')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('mfcc.png')
plt.show()
|
agpl-3.0
|
appapantula/scikit-learn
|
sklearn/linear_model/tests/test_bayes.py
|
299
|
1770
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/matplotlib/table.py
|
10
|
20553
|
"""
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
from . import artist
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from .cbook import is_string_like
from matplotlib import docstring
from .text import Text
from .transforms import Bbox
from matplotlib.path import Path
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
self.stale = True
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
self.stale = True
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
self.stale = False
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
self.stale = True
class CustomCell(Cell):
"""
A subclass of Cell where the sides may be visibly toggled.
"""
_edges = 'BRTL'
_edge_aliases = {'open': '',
'closed': _edges, # default
'horizontal': 'BT',
'vertical': 'RL'
}
def __init__(self, *args, **kwargs):
visible_edges = kwargs.pop('visible_edges')
Cell.__init__(self, *args, **kwargs)
self.visible_edges = visible_edges
@property
def visible_edges(self):
return self._visible_edges
@visible_edges.setter
def visible_edges(self, value):
if value is None:
self._visible_edges = self._edges
elif value in self._edge_aliases:
self._visible_edges = self._edge_aliases[value]
else:
for edge in value:
if edge not in self._edges:
msg = ('Invalid edge param {0}, must only be one of'
' {1} or string of {2}.').format(
value,
", ".join(self._edge_aliases.keys()),
", ".join(self._edges),
)
raise ValueError(msg)
self._visible_edges = value
self.stale = True
def get_path(self):
'Return a path where the edges specificed by _visible_edges are drawn'
codes = [Path.MOVETO]
for edge in self._edges:
if edge in self._visible_edges:
codes.append(Path.LINETO)
else:
codes.append(Path.MOVETO)
if Path.MOVETO not in codes[1:]: # All sides are visible
codes[-1] = Path.CLOSEPOLY
return Path(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
codes,
readonly=True
)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None, **kwargs):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(six.iterkeys(self.codes))))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._edges = None
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self.update(kwargs)
self.set_clip_on(False)
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = CustomCell(xy, visible_edges=self.edges, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
self.stale = True
@property
def edges(self):
return self._edges
@edges.setter
def edges(self, value):
self._edges = value
self.stale = True
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self.figure._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = list(six.iterkeys(self._cells))
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
# for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
self.stale = False
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
renderer = self.figure._cachedRenderer
if renderer is not None:
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return list(six.itervalues(self._cells))
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in six.itervalues(self._cells)]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in six.iteritems(self._cells):
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = list(six.iterkeys(widths))
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = list(six.iterkeys(heights))
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in six.iteritems(self._cells):
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
self.stale = True
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
self.stale = True
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
cells = []
for key, cell in six.iteritems(self._cells):
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in six.itervalues(self._cells):
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in six.itervalues(self._cells):
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in six.itervalues(self._cells):
cell.set_fontsize(size)
self.stale = True
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in six.itervalues(self._cells):
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = list(xrange(len(self.codes)))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed',
**kwargs):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed')
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
if cellColours is None and cellText is None:
raise ValueError('At least one argument from "cellColours" or '
'"cellText" must be provided to create a table.')
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * cols] * rows
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
if len(row) != cols:
msg = "Each row in 'cellText' must have {0} columns"
raise ValueError(msg.format(cols))
if cellColours is not None:
if len(cellColours) != rows:
raise ValueError("'cellColours' must have {0} rows".format(rows))
for row in cellColours:
if len(row) != cols:
msg = "Each row in 'cellColours' must have {0} columns"
raise ValueError(msg.format(cols))
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Fill in missing information for column
# and row labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * rows
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
if len(rowLabels) != rows:
raise ValueError("'rowLabels' must be of length {0}".format(rows))
# If we have column labels, need to shift
# the text and colour arrays down 1 row
offset = 1
if colLabels is None:
if colColours is not None:
colLabels = [''] * cols
else:
offset = 0
elif colColours is None:
colColours = 'w' * cols
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox, **kwargs)
table.edges = edges
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
|
mit
|
cfe316/atomic
|
examples/Lz_ne_tau.py
|
1
|
4466
|
# This is meant to remake Pitcher 1997's Figure 11,
# Radiated power coefficient L_Z for carbon as a function of electron
# temperature T_e for different values of residence parameter n_e \tau_res
#
# This appears to be the same (or nearly the same) as
# Post 1995a (Journal of Nuclear Materials) "A review of recent
# developments in atomic processes for divertors and edge plasmas"
# Figure 17.
import numpy as np
import matplotlib.pyplot as plt
import atomic
class AnnotateRight(object):
def __init__(self, lines, texts, loc='last', ha=None, va='center'):
self.lines = lines
self.texts = texts
self.location = loc
self.ha = ha
self.va = va
self.axes = lines[0].axes
self._compute_coordinates()
self._avoid_collision()
self._annotate()
def _data_to_axis(self, line):
ax = line.axes
xy = line.get_xydata()
xy_fig = ax.transData.transform(xy)
xy_ax = ax.transAxes.inverted().transform(xy_fig)
return xy_ax
def _compute_coordinates(self):
self.coordinates = [self._get_last_xy(l) for l in self.lines]
def _avoid_collision(self):
rtol = 0.02
new_texts = []
new_coordinates = []
xy_last = None
for xy, text in zip(self.coordinates, self.texts):
if (xy_last is None) or (abs(xy_last[1] - xy[1]) > rtol):
new_texts.append(text)
new_coordinates.append(xy)
else:
new_texts[-1] = ','.join((new_texts[-1], text))
xy_last = xy
self.coordinates = new_coordinates
self.texts = new_texts
def _get_last_xy(self, line):
if self.location == 'last':
index = -1
if self.location == 'first':
index = 0
xy_last = self._data_to_axis(line)[index]
return xy_last
def _annotate(self):
deltax = 0.01
for xy, text in zip(self.coordinates, self.texts):
if xy[0] < 0.1:
ha = self.ha or 'right'
else:
ha = self.ha or 'left'
if ha == 'right':
xy = xy[0] - deltax, xy[1]
elif ha == 'left':
xy = xy[0] + deltax, xy[1]
va = self.va
self.axes.annotate(text, xy, xycoords='axes fraction',
va=va, ha=ha, size='small')
def annotate_lines(texts, **kwargs):
ax = kwargs.pop('ax', plt.gca())
AnnotateRight(ax.lines, texts, **kwargs)
def time_dependent_z(solution, times):
element = solution.atomic_data.element
title = element + r' time dependent $\left<Z\right>$'
ax = plt.gca()
for y in solution.select_times(times):
ax.loglog(solution.temperature, y.mean_charge(), color='black', ls='--')
ax.set_xlabel(r'$T_\mathrm{e}\ \mathrm{(eV)}$')
ax.set_ylim(0.4, y.atomic_data.nuclear_charge + 4)
annotate_lines(['$10^{%d}$' % i for i in np.log10(times * solution.density)])
z_mean = solution.y_collrad.mean_charge()
ax.loglog(solution.temperature, z_mean, color='black')
ax.set_title(title)
def Lz_radiated_power(rate_equations, taus):
ax = plt.gca()
for tau in taus:
y = rt.solve(times, temperature, density, tau)
rad = atomic.Radiation(y.abundances[-1])
ax.loglog(temperature, rad.specific_power['total'],
color='black', ls='--')
annotate_lines(['$10^{%d}$' % i for i in np.log10(taus * rt.density)])
power_collrad = atomic.Radiation(y.y_collrad).specific_power['total']
ax.loglog(rate_equations.temperature, power_collrad, color='black')
AnnotateRight(ax.lines[-1:], ['$\infty$'])
element = 'Carbon'
title = element + r' Lz radiated power'
ax.set_xlabel(r'$T_\mathrm{e}\ \mathrm{(eV)}$')
ax.set_ylabel(r'$L_z [\mathrm{W m^3}]$')
ax.set_title(title)
if __name__ == '__main__':
times = np.logspace(-7, 0, 100)
temperature = np.logspace(np.log10(0.8), np.log10(3e3), 100)
density = 1e19
rt = atomic.RateEquationsWithDiffusion(atomic.element('carbon'))
taus = np.logspace(13,18,6)/density
plt.figure(1); plt.clf()
plt.xlim(xmin=0.2, xmax=1e4)
plt.ylim(ymin=1e-35, ymax=1e-30)
Lz_radiated_power(rt, taus)
plt.text(2e3,3e-31,r'$n_e \tau \; [\mathrm{m}^{-3} \, \mathrm{s}]$')
plt.draw()
# plt.figure(2); plt.clf()
# time_dependent_power(y, taus)
# plt.draw()
plt.show()
|
mit
|
JeanKossaifi/scikit-learn
|
sklearn/utils/tests/test_sparsefuncs.py
|
157
|
13799
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
|
bsd-3-clause
|
cernops/CloudMan
|
cloudman/cloudman/groupAllocationQueries.py
|
1
|
74719
|
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.template import RequestContext, loader, Context
from django.shortcuts import render_to_response
from django.conf import settings
from models import GroupAllocation
from django.db import transaction
from models import GroupAllocationMetadata
from models import GroupAllocationAllowedResourceType
from forms import GroupAllocationForm
from forms import ResourceForm
from models import Groups
from templatetags.filters import displayNone
from models import TopLevelAllocation
from models import ProjectAllocation
from models import ResourceType
from getCount import getGroupsCount
from getCount import getProjectAllocationsCount
from projectAllocationQueries import isAdminOfAnyProjectAllocation
from projectAllocationQueries import isAdminOfProjectAllocation
from getPrivileges import isSuperUser
from django.db.models import Q
from validator import *
import getConfig
import django
from logQueries import printStackTrace
from matplotlib import font_manager as fm
import simplejson
from commonFunctions import *
from groupQueries import isAdminForGroup
from groupQueries import isAdminOfAnyGroup
from projectAllocationQueries import getstats as prallocgetstats
from settings import GROUP_ALLOC_DEPTH
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import copy
def delUpdateAllowed(groupsList,grpAllocObj):
try:
prjAllocName = grpAllocObj.project_allocation.name
except Exception:
prjAllocName = ''
try:
grpAllocName = grpAllocObj.parent_group_allocation.name
except Exception:
grpAllocName = ''
if isSuperUser(groupsList):
return True
else:
if isAdminOfProjectAllocation(groupsList,prjAllocName) or isAdminOfGroupAllocation(groupsList,grpAllocName):
return True
else:
return False
def isAdminOfGroupAllocation(adminGroups, grpAllocName):
if isSuperUser(adminGroups):
return True
if len(adminGroups) < 1:
return False
try:
grAllocObject = GroupAllocation.objects.get(name=grpAllocName)
grAllocGroup = grAllocObject.group.admin_group
if grAllocGroup in adminGroups:
return True
prjAllocName =''
if grAllocObject.project_allocation:
prjAllocName = grAllocObject.project_allocation.name
grpAllocName =''
if grAllocObject.parent_group_allocation:
grpAllocName = grAllocObject.parent_group_allocation.name
else:
return isAdminOfProjectAllocation(adminGroups,prjAllocName) or isAdminOfGroupAllocation(adminGroups,grpAllocName)
except Exception:
return False
'''def isAdminOfGroupAllocation(adminGroups, grpAllocName):
userIsAdmin = False
if len(adminGroups) < 1:
return userIsAdmin
try:
grAllocObject = GroupAllocation.objects.get(name=grpAllocName)
grAllocGroup = grAllocObject.group.admin_group
for oneGroup in adminGroups:
if oneGroup == grAllocGroup:
userIsAdmin = True
break
except Exception:
userIsAdmin = False
return userIsAdmin
'''
def isAdminOfAnyGroupAllocation(adminGroups):
userIsAdmin = False
if len(adminGroups) < 1:
return userIsAdmin
qset = Q(group__admin_group__exact=adminGroups[0])
if len(adminGroups) > 1:
for group in adminGroups[1:]:
qset = qset | Q(group__admin_group__exact=group)
if (GroupAllocation.objects.filter(qset)).exists():
userIsAdmin = True
return userIsAdmin
def checkNameIgnoreCase(allocName):
allocNameExists = False
if GroupAllocation.objects.filter(name__iexact=allocName).exists():
allocNameExists = True
return allocNameExists
@transaction.commit_on_success
def addnew(request):
groups = request.META.get('ADFS_GROUP','')
groupsList = groups.split(';')
egroupList = groups.split(';') ;
## Group allocation is possible, if the following things are available
## atleast one group
## atleast one project allocation
groupsCount = getGroupsCount()
if groupsCount <= 0:
message = "No Groups Defined. First create Groups and then try to define Group Allocation";
html = "<html><body> %s.</body></html>" % message
return HttpResponse(html)
projectAllocationsCount = getProjectAllocationsCount()
if projectAllocationsCount <= 0:
message = "No Project Allocations Defined. First create Project Level Allocations and then try to define Group Allocation";
html = "<html><body> %s.</body></html>" % message
return HttpResponse(html)
## If the request is through form submission, then try to create the allocation
## or else return a form for creating new group allocation
if request.method == 'POST':
attr_name_array = request.POST.getlist('attribute_name');
attr_value_array = request.POST.getlist('attribute_value');
#Create dictionary of attr_name and attr_value with attr_name:attr_value as key:value pairs
attr_list = createDictFromList(attr_name_array,attr_value_array)
redirectURL = '/cloudman/message/?msg='
## check the specified name uniquess for group allocation
allocName = request.REQUEST.get("name", "")
## Get the remaining fields input values
groupName = request.REQUEST.get("group", "")
projectAllocationName = request.REQUEST.get("project_allocation", "")
parentGroupAllocationName = request.REQUEST.get("parent_group_allocation", "")
hepspec = request.REQUEST.get("hepspecs", "")
memory = request.REQUEST.get("memory", "")
storage = request.REQUEST.get("storage", "")
bandwidth = request.REQUEST.get("bandwidth", "")
selAllocResourceTypes = request.REQUEST.getlist("selresourcetype")
comment = request.REQUEST.get("comment", "")
try:
validate_name(allocName)
validate_name(groupName)
validate_name(projectAllocationName)
validate_name(parentGroupAllocationName)
validate_float(hepspec)
validate_float(memory)
validate_float(storage)
validate_float(bandwidth)
validate_comment(comment)
validate_attr(attr_list)
except ValidationError as e:
msg = 'Add Group Allocation Form '+', '.join(e.messages)
html = "<html><head><meta HTTP-EQUIV=\"REFRESH\" content=\"5; url=/cloudman/groupallocation/list/\"></head><body> %s.</body></html>" % msg
return HttpResponse(html)
allocNameExists = checkNameIgnoreCase(allocName)
if allocNameExists:
msgAlreadyExists = 'Allocation Name ' + allocName + ' already exists. Hence New Group Allocation Creation Stopped'
return HttpResponseRedirect(redirectURL + msgAlreadyExists)
## allocation is allowed only if user has
## either cloudman resource manager privileges
## or
## membership of admin group of selected group and
## membership of admin group of project allocation or parent group allocation (whichever is selected)
userIsSuperUser = isSuperUser(groupsList)
if not userIsSuperUser:
# if not (isAdminForGroup(groupName, groupsList)):
# message = "You neither have cloudman resource manager privileges nor membership of the Administrative E-Group of the Group " + groupName + ". Hence you are not authorized to create Group Allocation";
# html = "<html><body> %s.</body></html>" % message
# return HttpResponse(html)
if parentGroupAllocationName == '':
if not (isAdminOfProjectAllocation(groupsList, projectAllocationName)):
message = "You neither have cloudman resource manager privileges nor membership of the Administrative E-Group of the Project for which the Project Allocation " + projectAllocationName + " is created . Hence you are not authorized to create Group Allocation";
html = "<html><body> %s.</body></html>" % message
return HttpResponse(html)
else:
if not (isAdminOfGroupAllocation(groupsList, parentGroupAllocationName)):
message = "You neither have cloudman resource manager privileges nor membership of the Administrative E-Groups of the Group for which the Parent Group Allocation " + parentGroupAllocationName + " is created. Hence you are not authorized to create Group Allocation";
html = "<html><body> %s.</body></html>" % message
return HttpResponse(html)
## validate the resource parameters values
errorMessage = checkAttributeValues(hepspec, memory, storage, bandwidth)
if (errorMessage != ''):
return HttpResponseRedirect(redirectURL + errorMessage)
if hepspec == '':
hepspec = None
else:
hepspec = round((float(hepspec)), 3)
if memory == '':
memory = None
else:
memory = round((float(memory)), 3)
if storage == '':
storage = None
else:
storage = round((float(storage)), 3)
if bandwidth == '':
bandwidth = None
else:
bandwidth = round((float(bandwidth)), 3)
## Get the Group Object
groupObject = None
try:
groupObject = Groups.objects.get(name=groupName)
except Groups.DoesNotExist:
errorMessage = 'Groups Name ' + groupName + ' does not exists'
return HttpResponseRedirect(redirectURL + errorMessage)
projectAllocationObject = None
parentGroupAllocationObject = None
## Get the project allocation and parent group allocation (whichever is selected)
if parentGroupAllocationName == '':
try:
projectAllocationObject = ProjectAllocation.objects.get(name=projectAllocationName)
except TopLevelAllocation.DoesNotExist:
errorMessage = 'Project Allocation Name ' + projectAllocationName + ' does not exists'
return HttpResponseRedirect(redirectURL + errorMessage)
else:
try:
parentGroupAllocationObject = GroupAllocation.objects.get(name=parentGroupAllocationName)
except GroupAllocation.DoesNotExist:
errorMessage = 'Parent Group Allocation Name ' + parentGroupAllocationName + ' does not exists'
return HttpResponseRedirect(redirectURL + errorMessage)
level = int(groupAllocLevel(projectAllocationObject,parentGroupAllocationObject))
if level <= 0:
errorMessage = 'You are exceeding the Defined depth for the Allowed Group Allocation Level under The Project'
transaction.rollback()
return HttpResponseRedirect(redirectURL + errorMessage)
## initialize three dict, one each for total, free and used fraction resource parameter values
## get these values from the selected project allocation or parent group allocation
totResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
freeResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
usedFraction = {'hepspec': 0, 'memory': 0, 'storage': 0, 'bandwidth': 0 }
if parentGroupAllocationName == '':
## get the resource information of the selected project allocation
errorMessage = prallocgetstats(projectAllocationName, totResources, freeResources, usedFraction)
if errorMessage != '':
return HttpResponseRedirect(redirectURL + errorMessage)
else:
## get the resource information of the selected parent group allocation
errorMessage = getstats(parentGroupAllocationName, totResources, freeResources, usedFraction)
if errorMessage != '':
return HttpResponseRedirect(redirectURL + errorMessage)
## check whether the selected resource parameter values are available in the selected
## project allocation or parent group allocation
if (hepspec > freeResources['hepspec']):
message = "The Requested Hepspec value is greater than the available Hepspec"
return HttpResponseRedirect(redirectURL + message)
if (memory > freeResources['memory']):
message = "The Requested Memory value is greater than the available Memory"
return HttpResponseRedirect(redirectURL + message)
if (storage > freeResources['storage']):
message = "The Requested Storage value is greater than the available Storage"
return HttpResponseRedirect(redirectURL + message)
if (bandwidth > freeResources['bandwidth']):
message = "The Requested Bandwidth value is greater than the available Bandwidth"
return HttpResponseRedirect(redirectURL + message)
#Make Sure no attribute_name or attribute_value is empty
##Check if all the attribute name are distinct for this first convert all the attribute name to uppercase and
## After converting to uppercase check for duplicate in the array
if checkForEmptyStrInList(attr_name_array):
errorMessage = 'Attribute Name Cannot be Empty. Hence Add Group Allocation Operation Stopped'
return HttpResponseRedirect(redirectURL + errorMessage)
if checkForEmptyStrInList(attr_value_array):
errorMessage = 'Attribute Value Cannot be Empty. Hence Add Group Allocation Operation Stopped'
return HttpResponseRedirect(redirectURL + errorMessage)
##Check if all the attribute name are distinct for this first convert all the attribute name to uppercase and
## After converting to uppercase check for duplicate in the array
new_attr_name_array = [x.upper() for x in attr_name_array];
if len(new_attr_name_array) != len( set(new_attr_name_array) ):
errorMessage = 'Duplicate values for the Attribute Name. Hence Add Group Allocation Operation Stopped'
return HttpResponseRedirect(redirectURL + errorMessage)
## create the group allocation with all the input values
finalMessage = ''
try:
gralloc = GroupAllocation(name = allocName, group = groupObject, project_allocation = projectAllocationObject, parent_group_allocation = parentGroupAllocationObject, hepspec = hepspec, memory = memory, storage = storage, bandwidth = bandwidth)
gralloc.save()
alloc=GroupAllocation.objects.get(name=allocName)
for attr_name,attr_value in attr_list.items():
gralloc_metadata = GroupAllocationMetadata(attribute = attr_name,value = attr_value,group_allocation = alloc)
gralloc_metadata.save()
except Exception, err:
finalMessage = "Error in Creating Group Allocation , reason : %s" % str(err)
transaction.rollback()
html = "<html><body> %s.</body></html>" % finalMessage
return HttpResponse(html)
if parentGroupAllocationName == '':
finalMessage = "Group Allocation Created Successfully with Name %s using Project Allocation %s for Group %s with %s Hepspec, %s Memory, %s Storage, %s Bandwidth " % (allocName, projectAllocationName, groupName, (str(hepspec)), (str(memory)), (str(storage)), (str(bandwidth)))
else:
finalMessage = "Group Allocation Created Successfully with Name %s using Parent Group Allocation %s for Group %s with %s Hepspec, %s Memory, %s Storage, %s Bandwidth " % (allocName, parentGroupAllocationName, groupName, (str(hepspec)), (str(memory)), (str(storage)), (str(bandwidth)))
finalMessage += "<br/><br/>";
## create the group allocation allowed resource types
gralloc = None
try:
gralloc = GroupAllocation.objects.get(name = allocName)
finalMessage += " Assigning Allowed Resource Types to Allocation : <br/>"
for i in range(len(selAllocResourceTypes)):
selResourceType = selAllocResourceTypes[i]
finalMessage += " Resource Type Name: " + selResourceType + "<br/>"
resourceTypeRecord = ResourceType.objects.get(name = selResourceType)
allowedResourceType = GroupAllocationAllowedResourceType(group_allocation = gralloc, resource_type = resourceTypeRecord)
allowedResourceType.save()
except Exception, err:
finalMessage += "Exception arised while Assigning Allowed Resources Types for the Group Allocation, reason : %s " %str(err)
finalMessage += "<br/> Hence Group Allocation Creation Stopped Here (and also record cleared completely)."
gralloc.delete()
transaction.rollback()
html = "<html><body> %s.</body></html>" % finalMessage
return HttpResponse(html)
##Add the LOg
oldgroupAllocObj = GroupAllocation.objects.get(name = allocName)
if not addLog(request,allocName,comment,oldgroupAllocObj,None,'groupallocation','add',True):
transaction.rollback()
## finally, return a successful message to the user
finalMessage += "<br/> Group Allocation Creation Successfully Completed";
html = "<html><head><meta HTTP-EQUIV=\"REFRESH\" content=\"5; url=/cloudman/groupallocation/list/\"></head><body> %s.</body></html>" % finalMessage
return HttpResponse(html)
## form post request if condition ends here - start of else block
else:
## form is displayed if user has
## either cloudman resource manager privileges
## or
## membership of admin group of any group and
## membership of admin group of any project allocation or any parent group allocation
userIsSuperUser = isSuperUser(groupsList)
if not userIsSuperUser:
# if not (isAdminOfAnyGroup(groupsList)):
# message = "You neither have cloudman resource manager privileges nor membership of the Administrative E-Group of any Group. Hence you are not authorized to create Group Allocation";
# html = "<html><body> %s.</body></html>" % message
# return HttpResponse(html)
userIsProjectAdmin = isAdminOfAnyProjectAllocation(groupsList)
userIsGroupAdmin = isAdminOfAnyGroupAllocation(groupsList)
if not (userIsProjectAdmin or userIsGroupAdmin):
message = "You neither have cloudman resource manager privileges nor membership of any Group for which a Project Allocation is defined or membership of any Group or which a Group Allocation is defined. Hence you are not authorized to create Group Allocation";
html = "<html><body> %s.</body></html>" % message
return HttpResponse(html)
##Get all the details for preparing a form to be displayed
prAllocNames = []
grAllocNames = []
if userIsSuperUser:
prAllocNames = ProjectAllocation.objects.values_list('name', flat=True)
grAllocObjList = GroupAllocation.objects.all()
grAllocNames =[]
for grAllocObj in grAllocObjList:
level = int(groupAllocLevel(grAllocObj.project_allocation,grAllocObj.parent_group_allocation)) - 1
if level >0:
grAllocNames.append(grAllocObj.name)
else:
projectQset = Q(project__admin_group__exact=groupsList[0])
groupQset = Q(group__admin_group__exact=groupsList[0])
if len(groupsList) > 1:
for group in groupsList[1:]:
projectQset = projectQset | Q(project__admin_group__exact=group)
groupQset = groupQset | Q(group__admin_group__exact=group)
#prAllocNames = ProjectAllocation.objects.filter(projectQset).values_list('name', flat=True)
prAllocNames = ProjectAllocation.objects.filter(groupQset|projectQset).values_list('name', flat=True)
#grAllocNames = GroupAllocation.objects.filter(groupQset).values_list('name', flat=True)
grAllocNamesList = GroupAllocation.objects.values_list('name', flat=True)
grAllocNames = []
for allocName in grAllocNamesList:
if isAdminOfGroupAllocation(groupsList, allocName):
grAllocNames.append(allocName)
grNames = Groups.objects.values_list('name', flat=True)
## return to the template for rendering the form
form = ResourceForm
return render_to_response('groupallocation/addnew.html',locals(),context_instance=RequestContext(request))
def listall(request):
groupsAllocationList = GroupAllocation.objects.select_related('group','parent_group_allocation','project_allocation').all().order_by('name')
deleteDict = {}
groups = request.META.get('ADFS_GROUP','')
groupsList = groups.split(';') ;
showMultiDeleteOption = False
numManaged=0
for grpAllocObj in groupsAllocationList:
deleteItem = delUpdateAllowed(groupsList,grpAllocObj)
if deleteItem:
showMultiDeleteOption = True
numManaged +=1
deleteDict[grpAllocObj.name] = deleteItem
return render_to_response('groupallocation/listall.html',locals(),context_instance=RequestContext(request))
def getresourceinfo(request):
redirectURL = '/cloudman/message/?msg='
## if the request is through ajax, then dump the data in JSON format
#if request.is_ajax():
format = 'json'
mimetype = 'application/javascript'
groupAllocationName = request.REQUEST.get("name", "")
## initialize three dict, one each for total, free and used fraction resource parameter values
totResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
freeResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
usedFraction = {'hepspec': 0, 'memory': 0, 'storage': 0, 'bandwidth': 0 }
## call this function to calculate the above defined values
errorMessage = getstats(groupAllocationName, totResources, freeResources, usedFraction)
if errorMessage != '':
nulldata = []
data = simplejson.dumps(nulldata)
return HttpResponse(data,mimetype)
## frame an object with all the resource parameter info for this group allocation
## the information include, what is total available, how much is free and percentage of already allocated
groupAllocationInfo = [{"pk": groupAllocationName, "model": "cloudman.groupallocationinfo", "fields": {"tothepspecs": totResources['hepspec'], "totmemory": totResources['memory'], "totstorage": totResources['storage'], "totbandwidth": totResources['bandwidth']}}, {"model": "cloudman.groupallocationfreeinfo", "fields": {"hepspecsfree": freeResources['hepspec'], "memoryfree": freeResources['memory'], "storagefree": freeResources['storage'], "bandwidthfree": freeResources['bandwidth']}}, {"model": "cloudman.groupallocationusedinfoper", "fields":{"hepspecsfraction": usedFraction['hepspec'], "memoryfraction": usedFraction['memory'], "storagefraction": usedFraction['storage'], "bandwidthfraction": usedFraction['bandwidth']}}]
## Get the allowed resource types for this group allocation
groupAllocationResourceTypeObjects = GroupAllocationAllowedResourceType.objects.filter(group_allocation__name=groupAllocationName)
groupAllocationResourceTypeList = list(groupAllocationResourceTypeObjects)
resourceTypeIds = []
resourceTypeObjects = None
for oneRow in groupAllocationResourceTypeObjects:
resourceTypeIds.append(oneRow.resource_type.id)
if len(resourceTypeIds) > 0:
resourceTypeObjects = ResourceType.objects.filter(id__in=resourceTypeIds)
for oneRT in resourceTypeObjects:
groupAllocationInfo.append({"pk": oneRT.id, "model": "cloudman.resourcetype", "fields": {"name": oneRT.name, "resource_class": oneRT.resource_class, "hepspecs": oneRT.hepspecs, "memory": oneRT.memory, "storage": oneRT.storage, "bandwidth": oneRT.bandwidth}})
## finally dump the data into json and return
data = simplejson.dumps(groupAllocationInfo)
return HttpResponse(data,mimetype)
# If you want to prevent non AJAX calls
#else:
# return HttpResponse(status=400)
def getstats(grAllocName, totResources, freeResources, usedFraction):
errorMessage = ''
## Get the Group Allocation Object
groupAllocationObject = None
try:
groupAllocationObject = GroupAllocation.objects.get(name=grAllocName)
except GroupAllocation.DoesNotExist:
errorMessage = 'Group Allocation Name ' + grAllocName + ' does not exists'
return errorMessage
## Assign the resource parameter values to separate variables
totHepSpecs = groupAllocationObject.hepspec
totMemory = groupAllocationObject.memory
totStorage = groupAllocationObject.storage
totBandwidth = groupAllocationObject.bandwidth
totResources['hepspec'] = totHepSpecs
totResources['memory'] = totMemory
totResources['storage'] = totStorage
totResources['bandwidth'] = totBandwidth
## Get all the group allocations whose parent is this group allocation
groupAllocationObjects = GroupAllocation.objects.filter(parent_group_allocation__name = grAllocName)
## Find how much of this group allocation is already allocated. Start with none is allocated
hepSpecsFree = totHepSpecs
memoryFree = totMemory
storageFree = totStorage
bandwidthFree = totBandwidth
for oneGroup in groupAllocationObjects:
hepspec = oneGroup.hepspec
memory = oneGroup.memory
storage = oneGroup.storage
bandwidth = oneGroup.bandwidth
if (hepspec != None):
hepSpecsFree = hepSpecsFree - hepspec
if (memory != None):
memoryFree = memoryFree - memory
if (storage != None):
storageFree = storageFree - storage
if (bandwidth != None):
bandwidthFree = bandwidthFree - bandwidth
freeResources['hepspec'] = hepSpecsFree
freeResources['memory'] = memoryFree
freeResources['storage'] = storageFree
freeResources['bandwidth'] = bandwidthFree
## Calculate percentage of each resource parameter already allocated, initialize them to 0 first
hepSpecsFraction = 0
memoryFraction = 0
storageFraction = 0
bandwidthFraction = 0
if totHepSpecs != None:
if totHepSpecs > 0:
hepSpecsFraction = round((((totHepSpecs - hepSpecsFree)/totHepSpecs) * 100), 3)
if totMemory != None:
if totMemory > 0:
memoryFraction = round((((totMemory - memoryFree)/totMemory) * 100), 3)
if totStorage != None:
if totStorage > 0:
storageFraction = round((((totStorage - storageFree)/totStorage) * 100), 3)
if totBandwidth != None:
if totBandwidth > 0:
bandwidthFraction = round((((totBandwidth - bandwidthFree)/totBandwidth) * 100), 3)
usedFraction['hepspec'] = hepSpecsFraction
usedFraction['memory'] = memoryFraction
usedFraction['storage'] = storageFraction
usedFraction['bandwidth'] = bandwidthFraction
return errorMessage
@transaction.commit_on_success
def delete(request):
grAllocName = request.REQUEST.get("name", "")
comment = request.REQUEST.get("comment", "deleting")
redirectURL = '/cloudman/message/?msg='
groups = request.META.get('ADFS_GROUP','')
groupsList = groups.split(';') ;
## Get the Group Allocation Object
grAllocObject = None
try:
grAllocObject = GroupAllocation.objects.get(name=grAllocName)
except GroupAllocation.DoesNotExist:
failureMessage = "Group Allocation with Name " + grAllocName + " could not be found"
return HttpResponseRedirect(redirectURL+failureMessage)
## update is allowed only if the user has either
## cloudman resource manager privileges
## or has membership of the admin group of the group for which this allocation is done
if not delUpdateAllowed(groupsList,grAllocObject):
message = "Neither cloudman resource manager privileges nor membership of the Administrative E-Group of the Group " + grAllocObject.group.name + " for the Project allocation or parent group allocation for this Group Allocation. Hence not authorized to delete Group Allocation";
html = "<html><body> %s.</body></html>" % message
return HttpResponse(html)
# userIsSuperUser = isSuperUser(groupsList)
# if not userIsSuperUser:
# if not (isAdminForGroup(grAllocObject.group.name, groupsList)):
# message = "Neither cloudman resource manager privileges nor membership of the Administrative E-Group of the Group " + grAllocObject.group.name + " for which this Group Allocation is assigned. Hence not authorized to delete Group Allocation";
# html = "<html><body> %s.</body></html>" % message
# return HttpResponse(html)
## check if any group allocations have been defined using this allocation as its parent
grAllocNames = GroupAllocation.objects.filter(parent_group_allocation__name__iexact = grAllocName).values_list('name', flat=True).order_by('name')
## if yes, then alert the user and stop the delete operation
finalMessage = ''
grAllocNamesList = list(grAllocNames)
if len(grAllocNamesList) > 0:
finalMessage = finalMessage + "Group Allocation Names: " + (', '.join(grAllocNamesList)) + "<br/>"
if not finalMessage == '':
finalMessage = "Group Allocation with Name " + grAllocName + " Could not be deleted because it is being used as Parent Group in " + "<br/>" + finalMessage
html = "<html><body> %s</body></html>" % finalMessage
transaction.rollback()
return HttpResponse(html)
#Add the Log
oldGroupAllocObj = grAllocObject
addLog(request,grAllocName,comment,oldGroupAllocObj,None,'groupallocation','delete',False)
## if no allocations, then first delete the allowed resource types and then the allocation itself
GroupAllocationAllowedResourceType.objects.filter(group_allocation__name__iexact = grAllocName).delete()
grAllocObject.delete()
## return a success message to the user
message = "Group Allocation with Name " + grAllocName + " deleted successfully "
html = "<html><HEAD><meta HTTP-EQUIV=\"REFRESH\" content=\"4; url=/cloudman/groupallocation/list/\"></HEAD><body> %s.</body></html>" % message
return HttpResponse(html)
@transaction.commit_on_success
def deleteMultiple(request):
grAllocNameList = request.REQUEST.get("name_list", "")
comment = request.REQUEST.get("comment", "deleting")
printArray = []
groups = request.META.get('ADFS_GROUP','')
groupsList = groups.split(';') ;
title = "Delete multiple Group Allocation message"
grAllocNameArray = grAllocNameList.split("%%")
for grAllocName in grAllocNameArray:
## Get the Group Allocation Object
grAllocObject = None
try:
grAllocObject = GroupAllocation.objects.get(name=grAllocName)
except GroupAllocation.DoesNotExist:
printArray.append( "Group Allocation with Name " + grAllocName + " could not be found")
continue
## delete is allowed only if the user has either
## cloudman resource manager privileges
## or has membership of the admin group of the group for which this allocation is done
if not delUpdateAllowed(groupsList,grAllocObject):
message = "Neither cloudman resource manager privileges nor membership of the Administrative E-Group of the Group " + grAllocObject.group.name + "for the ProjectAllocation or ParentGroupAllocation for this Group Allocation. Hence not authorized to delete Group Allocation";
printArray.append(message)
continue
# userIsSuperUser = isSuperUser(groupsList)
# if not userIsSuperUser:
# if not (isAdminForGroup(grAllocObject.group.name, groupsList)):
# message = "Neither cloudman resource manager privileges nor membership of the Administrative E-Group of the Group " + grAllocObject.group.name + " for which this Group Allocation is assigned. Hence not authorized to delete Group Allocation";
# printArray.append(message)
# continue
## check if any group allocations have been defined using this allocation as its parent
grAllocNames = GroupAllocation.objects.filter(parent_group_allocation__name__iexact = grAllocName).values_list('name', flat=True).order_by('name')
## if yes, then alert the user and stop the delete operation
finalMessage = ''
grAllocNamesList = list(grAllocNames)
if len(grAllocNamesList) > 0:
finalMessage = finalMessage + "Group Allocation Names: " + (', '.join(grAllocNamesList)) + " "
if not finalMessage == '':
finalMessage = "Group Allocation with Name " + grAllocName + " Could not be deleted because it is being used as Parent Group in " + " " + finalMessage
printArray.append(finalMessage)
else:
#write the Log
addLog(request,grAllocName,comment,grAllocObject,None,'groupallocation','delete',False)
## if no allocations, then first delete the allowed resource types and then the allocation itself
GroupAllocationAllowedResourceType.objects.filter(group_allocation__name__iexact = grAllocName).delete()
grAllocObject.delete()
printArray.append("Group Allocation with Name " + grAllocName + " deleted successfully ")
return render_to_response('base/deleteMultipleMsg.html',locals(),context_instance=RequestContext(request))
def getdetails(request):
redirectURL = '/cloudman/message/?msg='
allocName = request.REQUEST.get("name", "")
## Get the Group Allocation Object
allocInfo = None
try:
allocInfo = GroupAllocation.objects.select_related('project_allocation','group','parent_group_allocation').get(name=allocName)
except GroupAllocation.DoesNotExist:
errorMessage = 'Group Allocation with Name ' + allocName + ' does not exists'
return HttpResponseRedirect(redirectURL + errorMessage)
##Get all the group allocation metadata for this
grAllocMetadata = GroupAllocationMetadata.objects.filter(group_allocation__name__iexact = allocName).values('attribute','value').order_by('attribute')
## Get the allowed resource types of this allocation
allowedResourceTypesList = GroupAllocationAllowedResourceType.objects.select_related('resource_type').filter(group_allocation = allocInfo).order_by('resource_type__name')
## Get all the group allocations done using this allocation as its parent
groupAllocationsInfo = GroupAllocation.objects.select_related('group').filter(parent_group_allocation__name=allocName).order_by('name')
object_id = allocInfo.id
changeLogList = getLog('groupallocation',allocName,object_id,None)
return render_to_response('groupallocation/getdetails.html',locals(),context_instance=RequestContext(request))
@transaction.commit_on_success
def update(request):
grAllocName = request.REQUEST.get("name", "")
redirectURL = '/cloudman/message/?msg='
groups = request.META.get('ADFS_GROUP','')
groupsList = groups.split(';') ;
## Get the Group Allocation Object
grAllocObject = None
try:
grAllocObject = GroupAllocation.objects.get(name=grAllocName)
except GroupAllocation.DoesNotExist:
failureMessage = "Group Allocation with Name " + grAllocName + " could not be found"
return HttpResponseRedirect(redirectURL+failureMessage)
##Get all the group allocation metadata for this
oldgrpAllocInfo = getGroupAllocationInfo(grAllocObject)
Metadata = GroupAllocationMetadata.objects.filter(group_allocation__name__iexact = grAllocName).values('attribute','value').order_by('attribute')
old_attr_list = {}
for oneRow in Metadata:
attribute = oneRow['attribute']
value = oneRow['value']
old_attr_list[attribute] = value
## update is allowed only if the user has either
## cloudman resource manager privileges
## or has membership of the admin group of the group for which this allocation is done
if not delUpdateAllowed(groupsList,grAllocObject):
message = "You neither have cloudman resource manager privileges nor membership of the Administrative E-Group of the Group " + grAllocObject.group.name + "for the Project Allocation or ParentGroupAllocation for this Group Allocation. Hence you are not authorized to update Group Allocation";
html = "<html><body> %s.</body></html>" % message
return HttpResponse(html)
# userIsSuperUser = isSuperUser(groupsList)
# if not userIsSuperUser:
# if not (isAdminForGroup(grAllocObject.group.name, groupsList)):
# message = "You neither have cloudman resource manager privileges nor membership of the Administrative E-Group of the Group " + grAllocObject.group.name + " for which this Group Allocation is assigned. Hence you are not authorized to update Group Allocation";
# html = "<html><body> %s.</body></html>" % message
# return HttpResponse(html)
## Get the allowed resource types of this group allocation
grAllocRTList = GroupAllocationAllowedResourceType.objects.filter(group_allocation__name=grAllocName).values_list('resource_type__name', flat=True)
## if the request is through POST form submission, then try to update by assinging the changed values
## else prepare an update form and return
if request.method == 'POST':
## Existing values
currName = grAllocObject.name
currHepSpec = grAllocObject.hepspec
currMemory = grAllocObject.memory
currStorage = grAllocObject.storage
currBandwidth = grAllocObject.bandwidth
oldgrAllocObject = copy.copy(grAllocObject)
## New Values
newName = request.REQUEST.get("newname", "")
newHepSpec = request.REQUEST.get("hepspec", "")
newMemory = request.REQUEST.get("memory", "")
newStorage = request.REQUEST.get("storage", "")
newBandwidth = request.REQUEST.get("bandwidth", "")
newRTList = request.REQUEST.getlist("grallocallowedrt")
comment = request.REQUEST.get("comment","")
scale = request.REQUEST.get("scale")
storagescale = request.REQUEST.get("storagescale")
## New values for Project metadata
new_attr_name_list = request.POST.getlist('attribute_name');
new_attr_value_list = request.POST.getlist('attribute_value');
#Create dictionary of attr_name and attr_value with attr_name:attr_value as key:value pairs
attr_list = createDictFromList(new_attr_name_list,new_attr_value_list)
try:
validate_name(newName)
validate_float(newHepSpec)
validate_float(newMemory)
validate_float(newStorage)
validate_float(newBandwidth)
validate_comment(comment)
validate_attr(attr_list)
except ValidationError as e:
msg = 'Edit Group Allocation Form '+', '.join(e.messages)
html = "<html><head><meta HTTP-EQUIV=\"REFRESH\" content=\"5; url=/cloudman/groupallocation/list/\"></head><body> %s.</body></html>" % msg
return HttpResponse(html)
## validate the new resource parameter values
errorMsg = checkAttributeValues(newHepSpec, newMemory, newStorage, newBandwidth)
if (errorMsg != ''):
return HttpResponseRedirect(redirectURL + errorMsg)
## check whether any existing resource type is de-selected or any new one selected
rtNotChanged = True;
for newRt in newRTList:
if not newRt in grAllocRTList:
rtNotChanged = False
for oldRt in grAllocRTList:
if not oldRt in newRTList:
rtNotChanged = False
## if the value is an empty string, assign NULL or else round off to 3 decimal digits
if (newHepSpec == ''):
newHepSpec = None
else:
newHepSpec = round((float(newHepSpec)), 3)
if (newMemory == ''):
newMemory = None
else:
newMemory = round((float(newMemory)), 3)
if (newStorage == ''):
newStorage = None
else:
newStorage = round((float(newStorage)), 3)
if (newBandwidth == ''):
newBandwidth = None
else:
newBandwidth = round((float(newBandwidth)), 3)
## check whether atleast one field is changed
if ( (currName == newName) and (currHepSpec == newHepSpec) and (currMemory == newMemory) and (currStorage == newStorage) and (currBandwidth == newBandwidth) and (rtNotChanged) ):
if checkForDictionaryEquality(attr_list,old_attr_list):
message = 'No New Value provided for any field to perform Edit Operation. Hence Edit Group Allocation ' + grAllocName + ' aborted'
return HttpResponseRedirect(redirectURL + message)
## if name is changed, validate it and then assign the new name
if (currName != newName):
if (newName == ''):
errorMsg = 'Name name field cannot be left blank. So Edit Group Allocation operation stopped'
return HttpResponseRedirect(redirectURL + errorMsg)
nameExists = checkNameIgnoreCase(newName)
if nameExists:
msgAlreadyExists = 'Group Allocation ' + newName + ' already exists. Hence Edit Group Allocation Operation Stopped'
return HttpResponseRedirect(redirectURL + msgAlreadyExists);
grAllocObject.name = newName
#Make Sure no attribute_name or attribute_value is empty
if checkForEmptyStrInList(new_attr_name_list):
errorMessage = 'Attribute Name Cannot be Empty. Hence Update Group Allocation Stopped'
return HttpResponseRedirect(redirectURL + errorMessage)
if checkForEmptyStrInList(new_attr_value_list):
errorMessage = 'Attribute Value Cannot be Empty. Hence Update Group Allocation Stopped'
return HttpResponseRedirect(redirectURL + errorMessage)
##Make Sure that all the attribute_name are distinct
if checkForDuplicateStrInList(new_attr_name_list):
errorMessage = 'Duplicate values for the Attribute Name. Hence Update Group Allocation Stopped'
return HttpResponseRedirect(redirectURL + errorMessage)
## if any of the resource parameter values changed
if ( (currHepSpec != newHepSpec) or (currMemory != newMemory) or (currStorage != newStorage) or (currBandwidth != newBandwidth) ):
## initialize three dict, one each for total, free and used fraction resource parameter values
## get these values from the selected project allocation or parent group allocation
totResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
freeResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
usedFraction = {'hepspec': 0, 'memory': 0, 'storage': 0, 'bandwidth': 0 }
parentStr = ''
if (grAllocObject.project_allocation):
parentStr = ' Project Allocation ' + grAllocObject.project_allocation.name
else:
parentStr = ' Parent Group Allocation ' + grAllocObject.parent_group_allocation.name
if (grAllocObject.project_allocation):
## get the resource information of the selected project allocation
errorMessage = prallocgetstats(grAllocObject.project_allocation.name, totResources, freeResources, usedFraction)
if errorMessage != '':
return HttpResponseRedirect(redirectURL + errorMessage)
else:
## get the resource information of the selected parent group allocation
errorMessage = getstats(grAllocObject.parent_group_allocation.name, totResources, freeResources, usedFraction)
if errorMessage != '':
return HttpResponseRedirect(redirectURL + errorMessage)
## calculate how much of project allocated resoures are used for group allocations
grUsedResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
calGroupUsedResources(grUsedResources, currName)
## check whether any changes to the exist resource values can be met using the project or parent group allocation
## Also, check these changes had any effect on the other group allocations which has used this as their parent
errorMessage = ''
if (currHepSpec != newHepSpec):
if newHepSpec == None:
if (grUsedResources['hepspec'] > 0):
errorMessage = errorMessage + 'Setting the Hepspec UNDEFINED is not possible as there exists alloted Hepspec for other Group allocations using this allocation as Parent '
else:
if currHepSpec == None:
if totResources['hepspec'] == None:
errorMessage = errorMessage + 'The requested Hepspec ' + str(newHepSpec) + ' cannot be fulfilled as Hepspec is UNDEFINED for ' + parentStr
else:
if ( freeResources['hepspec'] < newHepSpec ):
errorMessage = errorMessage + 'The requested Hepspec ' + str(newHepSpec) + ' is more than the free Hepspec available from ' + parentStr
else:
if ( (freeResources['hepspec'] + currHepSpec) < newHepSpec ):
errorMessage = errorMessage + 'The requested Hepspec ' + str(newHepSpec) + ' is more than the free Hepspec available from ' + parentStr
if ((scale is None) and (newHepSpec < grUsedResources['hepspec'])):
errorMessage = errorMessage + 'The requested Hepspec ' + str(newHepSpec) + ' is less than the already alloted Hepspec for other Group Allocations using this allocation as Parent '
if (currMemory != newMemory):
if newMemory == None:
if (grUsedResources['memory'] > 0):
errorMessage = errorMessage + 'Setting the Memory UNDEFINED is not possible as there exists alloted Memory for other Group allocations using this allocation as Parent '
else:
if currMemory == None:
if totResources['memory'] == None:
errorMessage = errorMessage + 'The requested Memory ' + str(newMemory) + ' cannot be fulfilled as Memory is UNDEFINED for ' + parentStr
else:
if ( freeResources['memory'] < newMemory ):
errorMessage = errorMessage + 'The requested Memory ' + str(newMemory) + ' is more than the free Memory available from ' + parentStr
else:
if ( (freeResources['memory'] + currMemory) < newMemory ):
errorMessage = errorMessage + 'The requested Memory ' + str(newMemory) + ' is more than the free Memory available from ' + parentStr
if (newMemory < grUsedResources['memory']):
errorMessage = errorMessage + 'The requested Memory ' + str(newMemory) + ' is less than the already alloted Memory for other Group Allocations using this allocation as Parent '
if (currStorage != newStorage):
if newStorage == None:
if (grUsedResources['storage'] > 0):
errorMessage = errorMessage + 'Setting the Storage UNDEFINED is not possible as there exists alloted Storage for other Group allocations using this allocation as Parent '
else:
if currStorage == None:
if totResources['storage'] == None:
errorMessage = errorMessage + 'The requested Storage ' + str(newStorage) + ' cannot be fulfilled as Storage is UNDEFINED for ' + parentStr
else:
if ( freeResources['storage'] < newStorage ):
errorMessage = errorMessage + 'The requested Storage ' + str(newStorage) + ' is more than the free Storage available from ' + parentStr
else:
if ( (freeResources['storage'] + currStorage) < newStorage ):
errorMessage = errorMessage + 'The requested Storage ' + str(newStorage) + ' is more than the free Storage available from ' + parentStr
if ((storagescale is None)) and (newStorage < grUsedResources['storage']):
errorMessage = errorMessage + 'The requested Storage ' + str(newStorage) + ' is less than the already alloted Storage for other Group allocations using this allocation as Parent '
if (currBandwidth != newBandwidth):
if newBandwidth == None:
if (grUsedResources['bandwidth'] > 0):
errorMessage = errorMessage + 'Setting the Bandwidth UNDEFINED is not possible as there exists alloted Bandwidth for other Group allocations using this allocation as Parent '
else:
if currBandwidth == None:
if totResources['bandwidth'] == None:
errorMessage = errorMessage + 'The requested Bandwidth ' + str(newBandwidth) + ' cannot be fulfilled as Bandwidth is UNDEFINED for ' + parentStr
else:
if ( freeResources['bandwidth'] < newBandwidth ):
errorMessage = errorMessage + 'The requested Bandwidth ' + str(newBandwidth) + ' is more than the free Bandwidth available from ' + parentStr
else:
if ( (freeResources['bandwidth'] + currBandwidth) < newBandwidth ):
errorMessage = errorMessage + 'The requested Bandwidth ' + str(newBandwidth) + ' is more than the free Bandwidth available from ' + parentStr
if (newBandwidth < grUsedResources['bandwidth']):
errorMessage = errorMessage + 'The requested Bandwidth ' + str(newBandwidth) + ' is less than the already alloted Bandwidth for other Group allocations using this allocation as Parent '
if errorMessage != '':
errorMessage = errorMessage + ' Hence Edit Group Allocation Operation Stopped'
return HttpResponseRedirect(redirectURL + errorMessage)
## assign the new values to the project allocation
## if any of the resource values becomes NULL, then to keep the consistency, make all group allocations
## resource value UNDEFINED i.e NULL for that parameter
if (currHepSpec != newHepSpec):
if newHepSpec == None:
GroupAllocation.objects.filter(parent_group_allocation__name = currName).update(hepspec=None)
grAllocObject.hepspec = newHepSpec
if (currMemory != newMemory):
if newMemory == None:
GroupAllocation.objects.filter(parent_group_allocation__name = currName).update(memory=None)
grAllocObject.memory = newMemory
if (currStorage != newStorage):
if newStorage == None:
GroupAllocation.objects.filter(parent_group_allocation__name = currName).update(storage=None)
grAllocObject.storage = newStorage
if (currBandwidth != newBandwidth):
if newBandwidth == None:
GroupAllocation.objects.filter(parent_group_allocation__name = currName).update(bandwidth=None)
grAllocObject.bandwidth = newBandwidth
## save all the changes
if scale is not None:
scalefactor = getScaleFactor(newHepSpec,currHepSpec)
scaleSubGroupAllocationHepSpec(currName,scalefactor,scale=True)
if storagescale is not None:
scalefactor = getScaleFactor(newStorage,currStorage)
scaleSubGroupAllocationStorage(currName,scalefactor,scale=True)
grAllocObject.save()
try:
GroupAllocationMetadata.objects.filter(group_allocation = grAllocObject).delete()
for attr_name,attr_value in attr_list.items():
gralloc_metadata = GroupAllocationMetadata(attribute = attr_name,value = attr_value,group_allocation = grAllocObject)
gralloc_metadata.save()
except Exception :
transaction.rollback()
printStackTrace()
## if the allowed resource type list is changed, then add newly selected or delete the un-selected ones
errorMessage = ''
if not rtNotChanged:
for newRt in newRTList:
if not newRt in grAllocRTList:
try:
rtObject = ResourceType.objects.get(name=newRt)
grrt = GroupAllocationAllowedResourceType(group_allocation=grAllocObject, resource_type=rtObject)
grrt.save()
except ResourceType.DoesNotExist:
transaction.rollback()
errorMessage = errorMessage + 'No Record Found for Resource Type ' + newRt + '. Hence Group Allocation Allowed Resource Types Edit Failed. '
for oldRt in grAllocRTList:
if not oldRt in newRTList:
try:
grrt = GroupAllocationAllowedResourceType.objects.get(resource_type__name=oldRt, group_allocation__name=grAllocName)
grrt.delete()
except GroupAllocationAllowedResourceType.DoesNotExist:
transaction.rollback()
errorMessage = errorMessage + 'No Record Found for Group Allocation Allowed Resource Type ' + oldRt + '. Hence Group Allocation Allowed Resource Types Edit Failed. '
#Write The Log
newgrpAllocInfo = getGroupAllocationInfo(grAllocObject)
objectId = grAllocObject.id
addUpdateLog(request,newName,objectId,comment,oldgrpAllocInfo,newgrpAllocInfo,'groupallocation',True)
## finally, return a successful message to the user
message = 'Group Allocation ' + grAllocName + ' Successfully Updated'
html = "<html><HEAD><meta HTTP-EQUIV=\"REFRESH\" content=\"4; url=/cloudman/groupallocation/list/\"></HEAD><body> %s <br/> %s</body></html>" % (message, errorMessage)
transaction.commit()
return HttpResponse(html)
totalHepSpec = None
totalMemory = None
totalStorage = None
totalBandwidth = None
hepSpecPer = None
memoryPer = None
storagePer = None
bandwidthPer = None
if (grAllocObject.project_allocation):
totalHepSpec = grAllocObject.project_allocation.hepspec
totalMemory = grAllocObject.project_allocation.memory
totalStorage = grAllocObject.project_allocation.storage
totalBandwidth = grAllocObject.project_allocation.bandwidth
if (grAllocObject.hepspec != None ):
if (grAllocObject.hepspec > 0):
hepSpecPer = round(((grAllocObject.hepspec/grAllocObject.project_allocation.hepspec) * 100), 3)
else:
hepSpecPer = 0
if (grAllocObject.memory != None ):
if (grAllocObject.memory > 0):
memoryPer = round(((grAllocObject.memory/grAllocObject.project_allocation.memory) * 100), 3)
else:
memoryPer = 0
if (grAllocObject.storage != None ):
if (grAllocObject.storage > 0) :
storagePer = round(((grAllocObject.storage/grAllocObject.project_allocation.storage) * 100), 3)
else:
storagePer = 0
if (grAllocObject.bandwidth != None ):
if (grAllocObject.bandwidth > 0):
bandwidthPer = round(((grAllocObject.bandwidth/grAllocObject.top_level_allocation.bandwidth) * 100), 3)
else:
bandwidthPer = 0
else:
totalHepSpec = grAllocObject.parent_group_allocation.hepspec
totalMemory = grAllocObject.parent_group_allocation.memory
totalStorage = grAllocObject.parent_group_allocation.storage
totalBandwidth = grAllocObject.parent_group_allocation.bandwidth
if (grAllocObject.hepspec != None ):
if (grAllocObject.hepspec > 0):
hepSpecPer = round(((grAllocObject.hepspec/grAllocObject.parent_group_allocation.hepspec) * 100), 3)
else:
hepSpecPer = 0
if (grAllocObject.memory != None ):
if (grAllocObject.memory > 0):
memoryPer = round(((grAllocObject.memory/grAllocObject.parent_group_allocation.memory) * 100), 3)
else:
memoryPer = 0
if (grAllocObject.storage != None ):
if (grAllocObject.storage > 0) :
storagePer = round(((grAllocObject.storage/grAllocObject.parent_group_allocation.storage) * 100), 3)
else:
storagePer = 0
if (grAllocObject.bandwidth != None ):
if (grAllocObject.bandwidth > 0):
bandwidthPer = round(((grAllocObject.bandwidth/grAllocObject.parent_group_allocation.bandwidth) * 100), 3)
else:
bandwidthPer = 0
## return to present the update form
return render_to_response('groupallocation/update.html',locals(),context_instance=RequestContext(request))
def calGroupUsedResources(grUsedResources, currName):
## calculate how much of project allocated resources are already allocated to groups
usedHepSpec = 0
usedMemory = 0
usedStorage = 0
usedBandwidth = 0
allGrAllocObjects = GroupAllocation.objects.filter(parent_group_allocation__name = currName)
for oneObject in allGrAllocObjects:
if (oneObject.hepspec != None):
usedHepSpec = usedHepSpec + oneObject.hepspec
if (oneObject.memory != None):
usedMemory = usedMemory + oneObject.memory
if (oneObject.storage != None):
usedStorage = usedStorage + oneObject.storage
if (oneObject.bandwidth != None):
usedBandwidth = usedBandwidth + oneObject.bandwidth
grUsedResources['hepspec'] = round(usedHepSpec, 3)
grUsedResources['memory'] = round(usedMemory, 3)
grUsedResources['storage'] = round(usedStorage, 3)
grUsedResources['bandwidth'] = round(usedBandwidth, 3)
## The following functions are not being used as of now
def getallowedresourcetypes(request):
#if request.is_ajax():
allocName = request.REQUEST.get("groupallocationname", "")
format = 'json'
mimetype = 'application/javascript'
rtInfo = []
groupAllocationResourceTypeObjects = GroupAllocationAllowedResourceType.objects.filter(group_allocation__name=allocName)
groupAllocationResourceTypeList = list(groupAllocationResourceTypeObjects)
resourceTypeIds = []
resourceTypeObjects = None
for oneRow in groupAllocationResourceTypeObjects:
resourceTypeIds.append(oneRow.resource_type.id)
if len(resourceTypeIds) > 0:
resourceTypeObjects = ResourceType.objects.filter(id__in=resourceTypeIds)
for oneRT in resourceTypeObjects:
rtInfo.append({"pk": oneRT.id, "model": "cloudman.groupallocationallowedresourcetype", "fields": {"name": oneRT.name, "resource_class": oneRT.resource_class, "hepspecs": oneRT.hepspecs, "memory": oneRT.memory, "storage": oneRT.storage, "bandwidth": oneRT.bandwidth}})
data = simplejson.dumps(rtInfo)
return HttpResponse(data,mimetype)
#else:
# return HttpResponse(status=400)
def updateAllocationHierarchy(currName, grAllocObject, newHepSpec, newMemory, newStorage, newBandwidth):
allGrAllocObjects = GroupAllocation.objects.filter(parent_group_allocation__name = currName)
oldHepSpec = grAllocObject.hepspec
oldMemory = grAllocObject.memory
oldStorage = grAllocObject.storage
oldBandwidth = grAllocObject.bandwidth
for oneObject in allGrAllocObjects:
if oneObject.hepspec_fraction != None:
if oneObject.hepspec_fraction == 0:
if newHepSpec == None:
oneObject.hepspec_fraction = None
else:
currHepSpec = round(((oneObject.hepspec_fraction * oldHepSpec)/100), 3)
oneObject.hepspec_fraction = round(((currHepSpec/newHepSpec) * 100), 3)
if oneObject.memory_fraction != None:
if oneObject.memory_fraction == 0:
if newMemory == None:
oneObject.memory_fraction = None
else:
currMemory = round(((oneObject.memory_fraction * oldMemory)/100), 3)
oneObject.memory_fraction = round(((currMemory/newMemory) * 100), 3)
if oneObject.storage_fraction != None:
if oneObject.storage_fraction == 0:
if newStorage == None:
oneObject.storage_fraction = None
else:
currStorage = round(((oneObject.storage_fraction * oldStorage)/100), 3)
oneObject.storage_fraction = round(((currStorage/newStorage) * 100), 3)
if oneObject.bandwidth_fraction != None:
if oneObject.bandwidth_fraction == 0:
if newBandwidth == None:
oneObject.bandwidth_fraction = None
else:
currBandwidth = round(((oneObject.bandwidth_fraction * oldBandwidth)/100), 3)
oneObject.bandwidth_fraction = round(((currBandwidth/newBandwidth) * 100), 3)
oneObject.save()
def getGroupAllocInGroupAllocation(request):
mimetype = 'application/javascript'
grpAllocName = request.REQUEST.get("name", "")
try:
grpAllocList = GroupAllocation.objects.filter(parent_group_allocation__name = grpAllocName).order_by('name')
groupAllocationInfo = []
for grpAlloc in grpAllocList:
name = grpAlloc.name
group = grpAlloc.group.name
hepspec = displayNone(grpAlloc.hepspec)
memory = displayNone(grpAlloc.memory)
storage = displayNone(grpAlloc.storage)
bandwidth = displayNone(grpAlloc.bandwidth)
groupAllocationInfo.append({'name':name,'group':group,'hepspec':hepspec,'memory':memory,
'storage':storage,'bandwidth':bandwidth})
except Exception:
printStackTrace()
data = simplejson.dumps(groupAllocationInfo)
return HttpResponse(data,mimetype)
def calParentTotResources(parentTotalResources, grAllocObject):
totHepSpec = None
totMemory = None
totStorage = None
totBandwidth = None
if (grAllocObject.project_allocation):
## This means the group has been allocated resources directly from the project
if (grAllocObject.project_allocation.hepspec_fraction != None):
totHepSpec = ( grAllocObject.project_allocation.hepspec_fraction * grAllocObject.project_allocation.top_level_allocation.hepspec)/100
if (grAllocObject.project_allocation.memory_fraction != None):
totMemory = ( grAllocObject.project_allocation.memory_fraction * grAllocObject.project_allocation.top_level_allocation.memory )/100
if (grAllocObject.project_allocation.storage_fraction != None):
totStorage = ( grAllocObject.project_allocation.storage_fraction * grAllocObject.project_allocation.top_level_allocation.storage )/100
if (grAllocObject.project_allocation.bandwidth_fraction != None):
totBandwidth = ( grAllocObject.project_allocation.bandwidth_fraction * grAllocObject.project_allocation.top_level_allocation.bandwidth )/100
else:
## This means a hierarchy has been formed, where this group is assigned resources from another group (which might have been allocated resources from another group etc..)
hepSpecsFractionList = []
memoryFractionList = []
storageFractionList = []
bandwidthFractionList = []
while True:
newGroupAllocationName = grAllocObject.parent_group_allocation.name
groupAllocationObject = None
try:
groupAllocationObject = GroupAllocation.objects.get(name=newGroupAllocationName)
except GroupAllocation.DoesNotExist:
errorMessage = 'Group Allocation Name ' + newGroupAllocationName + ' does not exists'
return errorMessage
if (groupAllocationObject.project_allocation):
break;
if groupAllocationObject.hepspec_fraction != None:
totHepSpec = ( groupAllocationObject.hepspec_fraction * (((groupAllocationObject.project_allocation.hepspec_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.hepspec))/100) )/100
for oneValue in reversed(hepSpecsFractionList):
if oneValue == None:
totHepSpec = None
break
totHepSpec = (oneValue * totHepSpec)/100
if groupAllocationObject.memory_fraction != None:
totMemory = ( groupAllocationObject.memory_fraction * (((groupAllocationObject.project_allocation.memory_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.memory))/100) )/100
for oneValue in reversed(memoryFractionList):
if oneValue == None:
totMemory = None
break
totMemory = (oneValue * totMemory)/100
if groupAllocationObject.storage_fraction != None:
totStorage = ( groupAllocationObject.storage_fraction * (((groupAllocationObject.project_allocation.storage_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.storage))/100) )/100
for oneValue in reversed(storageFractionList):
if (oneValue == None):
totStorage = None
break
totStorage = (oneValue * totStorage)/100
if groupAllocationObject.bandwidth_fraction != None:
totBandwidth = ( groupAllocationObject.bandwidth_fraction * (((groupAllocationObject.project_allocation.bandwidth_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.bandwidth))/100) )/100
for oneValue in reversed(bandwidthFractionList):
if (oneValue == None):
totBandwidth = None
break
totBandwidth = (oneValue * totBandwidth)/100
if (totHepSpec != None):
parentTotalResources['hepspec'] = round(totHepSpec, 3)
if (totMemory != None):
parentTotalResources['memory'] = round(totMemory, 3)
if (totStorage != None):
parentTotalResources['storage'] = round(totStorage, 3)
if (totBandwidth != None):
parentTotalResources['bandwidth'] = round(totBandwidth, 3)
def calGrAllocResources(grAllocObject):
if (grAllocObject.project_allocation):
## This means the group has been allocated resources directly from the project
if (grAllocObject.hepspec_fraction != None):
grAllocObject.hepspec = ( grAllocObject.hepspec_fraction * (((grAllocObject.project_allocation.hepspec_fraction) * (grAllocObject.project_allocation.top_level_allocation.hepspec))/100) )/100
if (grAllocObject.memory_fraction != None):
grAllocObject.memory = ( grAllocObject.memory_fraction * (((grAllocObject.project_allocation.memory_fraction) * (grAllocObject.project_allocation.top_level_allocation.memory))/100) )/100
if (grAllocObject.storage_fraction != None):
grAllocObject.storage = ( grAllocObject.storage_fraction * (((grAllocObject.project_allocation.storage_fraction) * (grAllocObject.project_allocation.top_level_allocation.storage))/100) )/100
if (grAllocObject.bandwidth_fraction != None):
grAllocObject.bandwidth = ( grAllocObject.bandwidth_fraction * (((grAllocObject.project_allocation.bandwidth_fraction) * (grAllocObject.project_allocation.top_level_allocation.bandwidth))/100) )/100
else:
## This means a hierarchy has been formed, where this group is assigned resources from another group (which might have been allocated resources from another group etc..)
hepSpecsFractionList = []
memoryFractionList = []
storageFractionList = []
bandwidthFractionList = []
while True:
hepSpecsFractionList.append(grAllocObject.hepspec_fraction)
memoryFractionList.append(grAllocObject.memory_fraction)
storageFractionList.append(grAllocObject.storage_fraction)
bandwidthFractionList.append(grAllocObject.bandwidth_fraction)
newGroupAllocationName = grAllocObject.parent_group_allocation.name
groupAllocationObject = None
try:
groupAllocationObject = GroupAllocation.objects.get(name=newGroupAllocationName)
except GroupAllocation.DoesNotExist:
errorMessage = 'Group Allocation Name ' + newGroupAllocationName + ' does not exists'
return errorMessage
if (groupAllocationObject.project_allocation):
break;
if ( (groupAllocationObject.hepspec_fraction != None) and (groupAllocationObject.project_allocation.hepspec_fraction != None) and (groupAllocationObject.project_allocation.top_level_allocation.hepspec != None) ):
grAllocObject.hepspec = ( groupAllocationObject.hepspec_fraction * (((groupAllocationObject.project_allocation.hepspec_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.hepspec))/100) )/100
for oneValue in reversed(hepSpecsFractionList):
if oneValue == None:
grAllocObject.hepspec = None
break
grAllocObject.hepspec = (oneValue * grAllocObject.hepspec)/100
if ( (groupAllocationObject.memory_fraction != None) and (groupAllocationObject.project_allocation.memory_fraction != None) and (groupAllocationObject.project_allocation.top_level_allocation.memory != None) ):
grAllocObject.memory = ( groupAllocationObject.memory_fraction * (((groupAllocationObject.project_allocation.memory_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.memory))/100) )/100
for oneValue in reversed(memoryFractionList):
if oneValue == None:
grAllocObject.memory = None
break
grAllocObject.memory = (oneValue * grAllocObject.memory)/100
if ( (groupAllocationObject.storage_fraction != None) and (groupAllocationObject.project_allocation.storage_fraction != None) and (groupAllocationObject.project_allocation.top_level_allocation.storage != None) ):
grAllocObject.storage = ( groupAllocationObject.storage_fraction * (((groupAllocationObject.project_allocation.storage_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.storage))/100) )/100
for oneValue in reversed(storageFractionList):
if (oneValue == None):
grAllocObject.storage = None
break
grAllocObject.storage = (oneValue * grAllocObject.storage)/100
if ( (groupAllocationObject.bandwidth_fraction != None) and (groupAllocationObject.project_allocation.bandwidth_fraction != None) and (groupAllocationObject.project_allocation.top_level_allocation.bandwidth != None) ):
grAllocObject.bandwidth = ( groupAllocationObject.bandwidth_fraction * (((groupAllocationObject.project_allocation.bandwidth_fraction) * (groupAllocationObject.project_allocation.top_level_allocation.bandwidth))/100) )/100
for oneValue in reversed(bandwidthFractionList):
if (oneValue == None):
grAllocObject.bandwidth = None
break
grAllocObject.bandwidth = (oneValue * grAllocObject.bandwidth)/100
if (grAllocObject.hepspec != None):
temp = round(grAllocObject.hepspec, 3)
grAllocObject.hepspec = temp
if (grAllocObject.memory != None):
temp1 = round(grAllocObject.memory, 3)
grAllocObject.memory = temp1
if (grAllocObject.storage != None):
temp2 = round(grAllocObject.storage, 3)
grAllocObject.storage = temp2
if (grAllocObject.bandwidth != None):
temp3 = round(grAllocObject.bandwidth, 3)
grAllocObject.bandwidth = temp3
def calParentUsedResources(parentUsedResources, grAllocObject):
totalResources = {'hepspec': None, 'memory': None, 'storage': None, 'bandwidth': None}
calParentTotResources(totalResources, grAllocObject)
usedHepSpec = 0
usedMemory = 0
usedStorage = 0
usedBandwidth = 0
allGrAllocObjects = None
if (grAllocObject.project_allocation):
## This means the group has been allocated resources directly from the project
allGrAllocObjects = GroupAllocation.objects.filter(project_allocation__name = grAllocObject.project_allocation.name)
else:
allGrAllocObjects = GroupAllocation.objects.filter(parent_group_allocation__name = grAllocObject.parent_group_allocation.name)
for oneObject in allGrAllocObjects:
if (oneObject.hepspec_fraction != None):
usedHepSpec = usedHepSpec + ( oneObject.hepspec_fraction * totalResources['hepspec'])/100
if (oneObject.memory_fraction != None):
usedMemory = usedMemory + ( oneObject.memory_fraction * totalResources['memory'] )/100
if (oneObject.storage_fraction != None):
usedStorage = usedStorage + ( oneObject.storage_fraction * totalResources['storage'] )/100
if (oneObject.bandwidth_fraction != None):
usedBandwidth = usedBandwidth + ( oneObject.bandwidth_fraction * totalResources['bandwidth'] )/100
parentUsedResources['hepspec'] = round(usedHepSpec, 3)
parentUsedResources['memory'] = round(usedMemory, 3)
parentUsedResources['storage'] = round(usedStorage, 3)
parentUsedResources['bandwidth'] = round(usedBandwidth, 3)
|
apache-2.0
|
robin-lai/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
205
|
5878
|
import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
|
bsd-3-clause
|
rahlk/WarnPlan
|
warnplan/planners/xtree.py
|
1
|
4495
|
"""
XTREE
"""
from __future__ import print_function, division
import os
import sys
# Update path
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
import pandas as pd
from tools import pyC45
from pdb import set_trace
from oracle.smote import SMOTE
from Utils.MiscUtils import flatten
from Utils.ExperimentUtils import Changes
from Utils.FileUtil import list2dataframe
from random import uniform, random as rand
class Patches:
def __init__(i, train, test, trainDF, testDF, tree=None, config=False):
i.train = train
i.trainDF = trainDF
i.test = test
i.testDF = testDF
i.config = config
i.tree = tree
i.change = []
def leaves(i, node):
"""
Returns all terminal nodes.
"""
L = []
if len(node.kids) > 1:
for l in node.kids:
L.extend(i.leaves(l))
return L
elif len(node.kids) == 1:
return [node.kids]
else:
return [node]
def find(i, testInst, t):
if len(t.kids) == 0:
return t
for kid in t.kids:
if i.config:
if kid.val[0] == testInst[kid.f].values[0]:
return i.find(testInst, kid)
else:
try:
if kid.val[0] <= testInst[kid.f].values[0] < kid.val[1]:
return i.find(testInst, kid)
elif kid.val[1] == testInst[kid.f].values[0] == \
i.trainDF.describe()[kid.f]['max']:
return i.find(testInst, kid)
except:
return i.find(testInst, kid)
return t
@staticmethod
def howfar(me, other):
common = [a for a in me.branch if a not in other.branch]
return len(me.branch) - len(common)
def patchIt(i, testInst, config=False):
C = Changes() # Record changes
testInst = pd.DataFrame(testInst).transpose()
current = i.find(testInst, i.tree)
node = current
while node.lvl > -1:
node = node.up # Move to tree root
leaves = flatten([i.leaves(_k) for _k in node.kids])
try:
if i.config:
best = sorted([l for l in leaves if l.score < current.score],
key=lambda F: i.howfar(current, F))[0]
else:
best = \
sorted(
[l for l in leaves if l.score <= 0.01 * current.score],
key=lambda F: i.howfar(current, F))[
1]
except:
return testInst.values.tolist()[0]
def new(old, range):
rad = abs(min(range[1] - old, old - range[1]))
return abs(range[0]), abs(range[1])
# return uniform(range[0], range[1])
for ii in best.branch:
before = testInst[ii[0]]
if not ii in current.branch:
then = testInst[ii[0]].values[0]
now = ii[1] if i.config else new(testInst[ii[0]].values[0],
ii[1])
# print(now)
testInst[ii[0]] = str(now)
# C.save(name=ii[0], old=then, new=now)
testInst[testInst.columns[-1]] = 1
# i.change.append(C.log)
return testInst.values.tolist()[0]
def main(i):
newRows = []
for n in xrange(i.testDF.shape[0]):
if i.testDF.iloc[n][-1] > 0 or i.testDF.iloc[n][-1] == True:
newRows.append(i.patchIt(i.testDF.iloc[n]))
return pd.DataFrame(newRows, columns=i.testDF.columns)
def xtree(train_df, test_df):
"""XTREE"""
if isinstance(train_df, list):
train_df = list2dataframe(train_df) # create a pandas dataframe of training data.dat
if isinstance(test_df, list):
test_df = list2dataframe(test_df) # create a pandas dataframe of testing data.dat
if isinstance(test_df, basestring):
test_df = list2dataframe([test_df]) # create a pandas dataframe of testing data.dat
# train_df = SMOTE(train_df, atleast=1000, atmost=1001)
tree = pyC45.dtree(train_df) # Create a decision tree
patch = Patches(train=None, test=None, trainDF=train_df, testDF=test_df,
tree=tree)
modified = patch.main()
return modified
if __name__ == '__main__':
pass
|
mit
|
jakevdp/seaborn
|
seaborn/rcmod.py
|
1
|
15625
|
"""Functions that alter the matplotlib rc dictionary on the fly."""
import numpy as np
import matplotlib as mpl
from . import palettes
_style_keys = (
"axes.facecolor",
"axes.edgecolor",
"axes.grid",
"axes.axisbelow",
"axes.linewidth",
"axes.labelcolor",
"grid.color",
"grid.linestyle",
"text.color",
"xtick.color",
"ytick.color",
"xtick.direction",
"ytick.direction",
"xtick.major.size",
"ytick.major.size",
"xtick.minor.size",
"ytick.minor.size",
"legend.frameon",
"legend.numpoints",
"legend.scatterpoints",
"lines.solid_capstyle",
"image.cmap",
"font.family",
"font.sans-serif",
)
_context_keys = (
"figure.figsize",
"axes.labelsize",
"axes.titlesize",
"xtick.labelsize",
"ytick.labelsize",
"legend.fontsize",
"grid.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.markersize",
"lines.markeredgewidth",
"xtick.major.width",
"ytick.major.width",
"xtick.minor.width",
"ytick.minor.width",
"xtick.major.pad",
"ytick.major.pad"
)
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
set_palette(palette, color_codes=color_codes)
if rc is not None:
mpl.rcParams.update(rc)
def reset_defaults():
"""Restore all RC params to default settings."""
mpl.rcParams.update(mpl.rcParamsDefault)
def reset_orig():
"""Restore all RC params to original settings (respects custom rc)."""
mpl.rcParams.update(mpl.rcParamsOrig)
class _AxesStyle(dict):
"""Light wrapper on a dict to set style temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_style = {k: rc[k] for k in _style_keys}
set_style(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_style(self._orig_style)
class _PlottingContext(dict):
"""Light wrapper on a dict to set context temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_context = {k: rc[k] for k in _context_keys}
set_context(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_context(self._orig_context)
def axes_style(style=None, rc=None):
"""Return a parameter dict for the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
This function returns an object that can be used in a ``with`` statement
to temporarily change the style parameters.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> st = axes_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
>>> import matplotlib.pyplot as plt
>>> with axes_style("white"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_style : set the matplotlib parameters for a seaborn theme
plotting_context : return a parameter dict to to scale plot elements
color_palette : define the color palette for a plot
"""
if style is None:
style_dict = {k: mpl.rcParams[k] for k in _style_keys}
elif isinstance(style, dict):
style_dict = style
else:
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
if style not in styles:
raise ValueError("style must be one of %s" % ", ".join(styles))
# Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = {
"text.color": dark_gray,
"axes.labelcolor": dark_gray,
"legend.frameon": False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.color": dark_gray,
"ytick.color": dark_gray,
"axes.axisbelow": True,
"image.cmap": "Greys",
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "Liberation Sans",
"Bitstream Vera Sans", "sans-serif"],
"grid.linestyle": "-",
"lines.solid_capstyle": "round",
}
# Set grid on or off
if "grid" in style:
style_dict.update({
"axes.grid": True,
})
else:
style_dict.update({
"axes.grid": False,
})
# Set the color of the background, spines, and grids
if style.startswith("dark"):
style_dict.update({
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"grid.color": "white",
})
elif style == "whitegrid":
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": light_gray,
"axes.linewidth": 1,
"grid.color": light_gray,
})
elif style in ["white", "ticks"]:
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": dark_gray,
"axes.linewidth": 1.25,
"grid.color": light_gray,
})
# Show or hide the axes ticks
if style == "ticks":
style_dict.update({
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 3,
"ytick.minor.size": 3,
})
else:
style_dict.update({
"xtick.major.size": 0,
"ytick.major.size": 0,
"xtick.minor.size": 0,
"ytick.minor.size": 0,
})
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _style_keys}
style_dict.update(rc)
# Wrap in an _AxesStyle object so this can be used in a with statement
style_object = _AxesStyle(style_dict)
return style_object
def set_style(style=None, rc=None):
"""Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> set_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
See Also
--------
axes_style : return a dict of parameters or use in a ``with`` statement
to temporarily set the style.
set_context : set parameters to scale plot elements
set_palette : set the default color palette for figures
"""
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"figure.figsize": np.array([8, 5.5]),
"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
"grid.linewidth": 1,
"lines.linewidth": 1.75,
"patch.linewidth": .3,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == "1.4.2":
context_dict["lines.markeredgewidth"] = 0.01
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object)
def set_palette(palette, n_colors=None, desat=None, color_codes=False):
"""Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : hls | husl | matplotlib colormap | seaborn color palette
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
desat : float
Proportion to desaturate each color by.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
Examples
--------
>>> set_palette("Reds")
>>> set_palette("Set1", 8, .75)
See Also
--------
color_palette : build a color palette or set the color cycle temporarily
in a ``with`` statement.
set_context : set parameters to scale plot elements
set_style : set the default parameters for figure style
"""
colors = palettes.color_palette(palette, n_colors, desat)
mpl.rcParams["axes.color_cycle"] = list(colors)
mpl.rcParams["patch.facecolor"] = colors[0]
if color_codes:
palettes.set_color_codes(palette)
|
bsd-3-clause
|
Obus/scikit-learn
|
sklearn/cluster/birch.py
|
207
|
22706
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
AnasGhrab/scikit-learn
|
examples/calibration/plot_compare_calibration.py
|
241
|
5008
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
njustesen/coep-starcraft
|
broodwar_strategy_evolver/evolution/plot_unit_stats.py
|
1
|
2842
|
import numpy as np
import matplotlib.pyplot as plt
from broodwar_strategy_evolver.starcraft.unit_repository import UnitRepository
from broodwar_strategy_evolver.starcraft.starcraft import Race, Type
from broodwar_strategy_evolver.starcraft.forward_model import ForwardModel, GameState
from broodwar_strategy_evolver.starcraft.adv_heuristics import heuristic
unit_repo = UnitRepository()
zealot = []
dragoon = []
dark_templar = []
marine = []
firebat = []
medic = []
frame = []
seconds = []
minutes = []
file = open("stats/unit_history_3.dat", 'r')
lines = []
for line in file:
lines = line.split(" ")
for line in lines:
records = line.split(";")
i = -1
for record in records:
if i == -1:
t = int(record.split(';')[0])
frame.append(t / 1000 * 23.81)
seconds.append(t / 1000 / 60)
minutes.append(t / 1000)
else:
if i == unit_repo.get_by_name("Zealot").id:
zealot.append(float(record))
elif i == unit_repo.get_by_name("Dragoon").id:
dragoon.append(float(record))
elif i == unit_repo.get_by_name("Dark Templar").id:
dark_templar.append(float(record))
elif i == unit_repo.get_by_name("Marine").id:
marine.append(float(record))
elif i == unit_repo.get_by_name("Firebat").id:
firebat.append(float(record))
elif i == unit_repo.get_by_name("Medic").id:
medic.append(float(record))
i += 1
#plt.ylim(ymin=min_y)
#plt.ylim(ymax=max_y)
plt.figure(figsize=(10, 2))
plt.xlim(xmax=14000)
plt.xticks(np.arange(0, 14000+1, 1000))
plt.yticks(np.arange(0, 10+1, 2))
plt.ylim(ymin=0)
plt.ylim(ymax=10)
plt.gcf().subplots_adjust(bottom=0.25)
plt.plot(frame, zealot, color="red", label="Protoss Zealots", linewidth=1, linestyle="solid")
plt.fill_between(frame, 0, zealot, facecolor="red", alpha=0.10, linewidth=0.0)
plt.plot(frame, dragoon, color="red", label="Protoss Dragoons", linewidth=2, linestyle="solid")
plt.fill_between(frame, 0, dragoon, facecolor="red", alpha=0.10, linewidth=0.0)
#plt.plot(seconds, dark_templar, color="orange", label="Protoss Dark Templars", linewidth=2, linestyle="dotted")
plt.plot(frame, marine, color="blue", label="Terran Marines", linewidth=1, linestyle="dotted")
plt.fill_between(frame, 0, marine, facecolor="blue", alpha=0.10, linewidth=0.0)
plt.plot(frame, firebat, color="blue", label="Terran Firebats", linewidth=2, linestyle="dashdot")
plt.fill_between(frame, 0, firebat, facecolor="blue", alpha=0.10, linewidth=0.0)
#plt.plot(seconds, medic, color="blue", label="Terran Medics", linewidth=0.5, linestyle="dotted")
plt.ylabel('Unit Count')
plt.xlabel('Frame')
plt.legend(loc='best', frameon=False)
#plt.show()
plt.savefig('stats/unit_count_3.png', dpi=500)
|
gpl-3.0
|
TinyOS-Camp/DDEA-DEV
|
Archive/[14_09_12] DDEA_example_code/plot_csv.py
|
5
|
27192
|
"""
==============================================
Visualizing the enegy-sensor-weather structure
==============================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
#print(__doc__)
# Author: Deokwooo Jung [email protected]
from __future__ import division # To forace float point division
import os
import sys
import numpy as np
import pylab as pl
from scipy import stats
import matplotlib.pyplot as plt
#from datetime import datetime
import datetime as dt
from dateutil import tz
import shlex, subprocess
import mytool as mt
import time
import retrieve_weather as rw
import itertools
import mpl_toolkits.mplot3d.axes3d as p3
import calendar
from sklearn import cluster, covariance, manifold # Machine Learning Packeage
###############################################################################
# Constant global variables
###############################################################################
# in seconds
MIN=60; HOUR=60*MIN; DAY=HOUR*24; MONTH=DAY*31
# Hour, Weekday, Day, Month
MIN_IDX=0;HR_IDX=1; WD_IDX=2; MD_IDX=3 ;MN_IDX=4
# Define the period for analysis - year, month, day,hour
# Note: The sample data in the currently downloaded files are from 1 Apr 2013 to
# 30 Nov 2013.
ANS_START_T=dt.datetime(2013,7,1,0)
ANS_END_T=dt.datetime(2013,7,5,0)
#ANS_END_T=dt.datetime(2013,8,30,0)
# Interval of timelet, currently set to 1 Hour
TIMELET_INV=dt.timedelta(hours=1)
# UTC time of weather data
from_zone = tz.gettz('UTC')
# VTT local time
to_zone = tz.gettz('Europe/Helsinki')
# Multi-dimensional lists of hash tables
time_slots=[]
start=ANS_START_T
while start < ANS_END_T:
#print start
time_slots.append(start)
start = start + TIMELET_INV
# Data dictionary
# All sensor and weather data is processed and structred into
# a consistent single data format -- Dictionary
data_dict={}
# This is the list of non-digit symbolic weather data
# The symbolic weather data is such as Conditions (e.g Cloudy or Clear)
# and Events (e.g. Rain or Fog ...)
# Those symblic data is replaced with integer state representation whose
# pairs are stored in a hash table using Dictionary.
# If no data is given, key value is set to 0.
Conditions_dict={};Conditions_val=[];key_val_c=0
Events_dict={};Events_val=[]; key_val_e=0
Is_CSV=bool(0)
Start_t=time.time()
argv_len=len(sys.argv)
print 'arg length:',argv_len
###############################################################################
# Function
###############################################################################
def daterange(start, stop, step=dt.timedelta(days=1), inclusive=False):
# inclusive=False to behave like range by default
if step.days > 0:
while start < stop:
yield start
start = start + step
# not +=! don't modify object passed in if it's mutable
# since this function is not restricted to
# only types from datetime module
elif step.days < 0:
while start > stop:
yield start
start = start + step
if inclusive and start == stop:
yield start
###############################################################################
# Retrive weather data from internet for the specified periods
# prefix_order=TS (default) [Time][Sensor]
# prefix_order=ST [Sensor][Time]
###############################################################################
"""
def get_weather(t_start, t_end, perfix_order='TS'):
print 'getting weater data '
print 'start time:', t_start, ' ~ end time:',t_end
data_days=[]
for date in daterange(t_start, t_end, inclusive=True):
#print date.strftime("%Y-%m-%d")
temp=date.strftime("%Y,%m,%d").rsplit(',')
data_day=rw.retrieve_data('VTT', int(temp[0]), int(temp[1]), int(temp[2]), view='d')
data_day=data_day.split('\n')
if perfix_order=='TS':
# order by [Sensor][Time]
# Paring the strings of daily weather data
day_sample_parse=[]
for hour_sample in data_day:
#print hour_sample
day_sample_parse.append(hour_sample.split(','))
data_days.append(day_sample_parse)
else:
# order by [Time][Sensor]
# Paring the strings of daily weather data
#f=open('weather_data.txt','w')
day_sample_parse=[]
for h_idx,hour_sample in enumerate(data_day):
#print hour_sample
if h_idx==0:
sensor_name_list=hour_sample.split(',')
# f.write(str(sensor_name_list)+'\n')
else:
hour_samples=hour_sample.split(',')
#print hour_samples
#f.write(str(hour_samples)+'\n')
for sample_idx,each_sample in enumerate(hour_samples):
sensor_name=sensor_name_list[sample_idx]
if sensor_name in data_dict:
data_dict[sensor_name].append(each_sample)
else:
data_dict.update({sensor_name:[each_sample]})
if perfix_order=='TS':
return data_days
else:
return sensor_name_list
#f.close()
"""
###############################################################################
# Plotting tool
###############################################################################
def plotting_data(plot_list,opt='val'):
# times is seconds, but it might not correct for months with 30 days.
#times_in_secs=(time_val[:,[HR_IDX,MD_IDX,MN_IDX]]*[HOUR,DAY,MONTH]).sum(axis=1)
# Minute,Hour, Weekday, Day, Month - total 5 time fields
time_mat=np.zeros([len(time_slots),5])
for i, time_sample in enumerate(time_slots):
time_mat[i,HR_IDX]=time_sample.hour
time_mat[i,WD_IDX]=time_sample.weekday()
time_mat[i,MD_IDX]=time_sample.day
time_mat[i,MN_IDX]=time_sample.month
monthDict={1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
weekDict={0:'Mon', 1:'Tue', 2:'Wed', 3:'Thur', 4:'Fri', 5:'Sat', 6:'Sun'}
# Month indicator
time_mn_diff=np.diff(time_mat[:,MN_IDX])
m_label_idx=time_mn_diff.nonzero()[0]
m_label_str=[]
for m_num in time_mat[m_label_idx,MN_IDX]:
m_label_str.append(monthDict[m_num])
time_wk_diff=np.diff(time_mat[:,WD_IDX])
w_label_idx=time_wk_diff.nonzero()[0]
w_label_str=[]
for w_num in time_mat[w_label_idx,WD_IDX]:
w_label_str.append(weekDict[int(w_num)])
for k,sensor in enumerate(plot_list):
#print k, sensor
num_samples=[]
mean_samples=[]
for i,(t,samples) in enumerate(zip(time_slots,data_dict[sensor])):
#print i,str(t),len(samples)
num_samples.append(len(samples))
# Mean value with masking
mean_samples.append(np.mean(samples))
#mean_samples.append(np.mean(np.ma.masked_invalid(samples))
#sensor_samples.append(num_samples)
plt.figure(1)
plt.subplot(len(plot_list),1,k+1)
plt.plot(time_slots,num_samples)
plt.title(sensor,fontsize=8)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.ylabel('# Samples/Hour',fontsize=8)
if k<len(plot_list)-1:
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
#frame1.axes.get_yaxis().set_visible(False)
plt.figure(2)
plt.subplot(len(plot_list),1,k+1)
plt.plot(time_slots,mean_samples)
plt.title(sensor,fontsize=8)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.ylabel('Avg Val/Hour',fontsize=8)
if k<len(plot_list)-1:
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
#frame1.axes.get_yaxis().set_visible(False)
#plt.xticks(w_label_idx.tolist(),w_label_str,fontsize=8)
#plt.text(m_label_idx, np.max(num_samples)*0.8, m_label_str, fontsize=12)
print ' End of Plotting'
return time_mat
###############################################################################
# Parsing sensor data
###############################################################################
def get_val(filename):
if Is_CSV==True:
openfile=open(filename,"r")
sensor_val=[]
time_val=[];
for line in openfile:
tmp=line.rstrip().rsplit(",")
sensor_val.append(float(tmp[1]))
temp=dt.datetime.strptime(tmp[0],"%Y-%m-%d %H:%M:%S")
temp=temp.timetuple()
# Hour, Weekday, Day, Month
time_val.append([temp[3],temp[6],temp[2],temp[1]])
openfile.close()
#print 'list of input csv files: '
else:
data = mt.loadObjectBinary(filename)
sensor_val = data["value"]
time_val = data["ts"]
#print 'list of input bin files: '
return sensor_val,time_val
def get_val_timelet(filename,t_slots):
print ' get_val_timelet'
if Is_CSV==True:
openfile=open(filename,"r")
sensor_val=[]
time_val=[];
for line in openfile:
tmp=line.rstrip().rsplit(",")
sensor_val.append(float(tmp[1]))
temp=dt.datetime.strptime(tmp[0],"%Y-%m-%d %H:%M:%S")
temp=temp.timetuple()
# Hour, Weekday, Day, Month
time_val.append([temp[3],temp[6],temp[2],temp[1]])
openfile.close()
#print 'list of input csv files: '
else:
data = mt.loadObjectBinary(filename)
sensor_val = data["value"]
time_val = data["ts"]
# Creat the list of lists
sensor_read=[[] for i in range(len(t_slots))]
for t_sample, v_sample in zip(time_val,sensor_val):
#import pdb; pdb.set_trace()
# If data in 2013 is only available after Aprile, Otherwise it is 2014 data
if t_sample[MN_IDX]>3:
temp_dt=dt.datetime(2013,t_sample[MN_IDX],t_sample[MD_IDX],t_sample[HR_IDX])
else:
temp_dt=dt.datetime(2014,t_sample[MN_IDX],t_sample[MD_IDX],t_sample[HR_IDX])
#print temp_dt
try:
idx=t_slots.index(temp_dt)
sensor_read[idx].append(v_sample)
except ValueError:
idx=-1
return sensor_read, time_val
###############################################################################
# Parsing sensor data
# Data samples are regularized for specified times with timelet
###############################################################################
def symbol_to_state(symbol_list):
#list(itertools.chain(*list_of_lists))
symbol_dict={};symbol_val=[];key_val=1
print 'start'
for i,key_set in enumerate(symbol_list):
symbol_val_let=[]
for key in key_set:
if key not in symbol_dict:
if len(key)==0:
symbol_dict.update({key:0})
symbol_val_let.append(0)
else:
symbol_dict.update({key:key_val})
symbol_val_let.append(key_val)
key_val=key_val+1
else:
symbol_val_let.append(symbol_dict[key])
symbol_val.append(symbol_val_let)
return symbol_val,symbol_dict
def get_weather(t_start, t_end, perfix_order='TS'):
print 'getting weater data new '
print 'start time:', t_start, ' ~ end time:',t_end
data_days=[]
# Date iteration given start time and end-time
for date in daterange(t_start, t_end, inclusive=True):
print date.strftime("%Y-%m-%d")
temp=date.strftime("%Y,%m,%d").rsplit(',')
data_day=rw.retrieve_data('VTT', int(temp[0]), int(temp[1]), int(temp[2]), view='d')
data_day=data_day.split('\n')
if perfix_order=='TS':
# order by [Sensor][Time]
# Paring the strings of daily weather data
day_sample_parse=[]
for hour_sample in data_day:
#print hour_sample
day_sample_parse.append(hour_sample.split(','))
data_days.append(day_sample_parse)
else:
# order by [Time][Sensor]
# Paring the strings of daily weather data
#f=open('weather_data.txt','w')
day_sample_parse=[]
for h_idx,hour_sample in enumerate(data_day):
#print hour_sample
if h_idx==0:
sensor_name_list=hour_sample.split(',')
# f.write(str(sensor_name_list)+'\n')
else:
hour_samples=hour_sample.split(',')
#print hour_samples
#f.write(str(hour_samples)+'\n')
for sample_idx,each_sample in enumerate(hour_samples):
sensor_name=sensor_name_list[sample_idx]
if sensor_name in data_dict:
data_dict[sensor_name].append(each_sample)
else:
data_dict.update({sensor_name:[each_sample]})
if perfix_order=='TS':
return data_days
else:
return sensor_name_list
def get_weather_timelet(t_slots):
print 'getting weater data new '
t_start=t_slots[0]
t_end=t_slots[-1]
print 'start time:', t_start, ' ~ end time:',t_end
# Date iteration given start time and end-time
# Iterate for each day for all weather data types
for date_idx,date in enumerate(daterange(t_start, t_end, inclusive=True)):
print date.strftime("%Y-%m-%d")
temp=date.strftime("%Y,%m,%d").rsplit(',')
data_day=rw.retrieve_data('VTT', int(temp[0]), int(temp[1]), int(temp[2]), view='d')
# split the data into t
data_day=data_day.split('\n')
# Iterate for each time index(h_idx) of a day for all weather data types
for h_idx,hour_sample in enumerate(data_day):
hour_samples=hour_sample.split(',')
# Initialize weather data lists of dictionary
# The first row is always the list of weather data types
if (h_idx==0) and (date_idx==0):
sensor_name_list=hour_sample.split(',')
for sample_idx,each_sample in enumerate(hour_samples):
sensor_name=sensor_name_list[sample_idx]
sensor_read=[[] for i in range(len(t_slots))]
data_dict.update({sensor_name:sensor_read})
elif h_idx>0:
# 'DateUTC' is the one
sample_DateUTC=hour_samples[sensor_name_list.index('DateUTC')]
# convert to UTC time to VTT local time.
utc_dt=dt.datetime.strptime(sample_DateUTC, "%Y-%m-%d %H:%M:%S")
vtt_dt_aware = utc_dt.replace(tzinfo=from_zone).astimezone(to_zone)
# convert to offset-naive from offset-aware datetimes
vtt_dt=dt.datetime(*(vtt_dt_aware.timetuple()[:4]))
# time slot index a given weather sample time
try:
vtt_dt_idx=t_slots.index(vtt_dt)
for sample_idx,each_sample in enumerate(hour_samples):
# convert string type to float time if possible
try:
each_sample=float(each_sample)
except ValueError:
each_sample=each_sample
sensor_name=sensor_name_list[sample_idx]
#import pdb; pdb.set_trace()
if sensor_name in data_dict:
if each_sample!='N/A' and each_sample!=[]:
data_dict[sensor_name][vtt_dt_idx].append(each_sample)
else:
raise NameError('Inconsistency in the list of weather data')
except ValueError:
vtt_dt_idx=-1
else:
# hour_sample is list of weather filed name, discard
hour_sample=[]
return sensor_name_list
def data_dict_purge(purge_list):
for key in purge_list:
print 'purge', key
if key in data_dict.keys():
data_dict.pop(key,None)
#data_dict_purge(weather_list)
###############################################################################
# Reading sensor data from CSV or BIN files - use linux commands
###############################################################################
input_csvs=[]
num_csvs=[]
if argv_len==1:
if Is_CSV==True:
temp = subprocess.check_output("ls *.csv |grep _ACTIVE_POWER_", shell=True)
else:
temp = subprocess.check_output("ls *.bin |grep _ACTIVE_POWER_", shell=True)
input_csvs =shlex.split(temp)
plt.ion()
print 'argv 1'
elif argv_len>1:
input_csvs=sys.argv[1:]
print 'getting args'
else:
input_csvs=[]
print '...'
num_csvs=len(input_csvs)
num_col_subplot=np.ceil(np.sqrt(num_csvs))
###############################################################################
# Analysis script starts here ....
# List of sensors from BMS
print 'mapping sensor list into hasing table using dictionary'
sensor_list=input_csvs
# List of sensors from Weather data
# getting weather files
# Weather parameter list
#['TimeEEST', 'TemperatureC', 'Dew PointC', 'Humidity',
# 'Sea Level PressurehPa', 'VisibilityKm', 'Wind Direction',
# 'Wind SpeedKm/h', 'Gust SpeedKm/h', 'Precipitationmm',
# 'Events', 'Conditions', 'WindDirDegrees', 'DateUTC']
# Note: We select 'TemperatureC', 'Dew PointC', 'Humidity',
# 'Events', 'Conditions' for the main weather parameter
#weather_list=get_weather(ANS_START_T, ANS_END_T,'ST')
# Checking length of weather sample data
print "lenth of dictionary"
for key in data_dict.keys():
print 'len of ', key, len(data_dict[key])
# data dictionary that map all types of sensor readings into a single hash table
###############################################################################
# Read out all sensor files in the file list
time_set_temp=[]
for i,argv in enumerate(sensor_list):
print 'index ',i+1,': ', argv
# sensor value is read by time
start__dictproc_t=time.time()
dict_sensor_val, dict_time_val=get_val_timelet(argv,time_slots)
data_dict.update({argv:dict_sensor_val})
end__dictproc_t=time.time()
print argv,'- dict.proc time is ', end__dictproc_t-start__dictproc_t
print 'Check sample density over time slots'
time_mat=plotting_data(sensor_list[0:2])
"""
weather_list -that is pretty much fixed from database
(*) is the data to be used for our analysis
0 TimeEEST
1 TemperatureC (*)
2 Dew PointC (*)
3 Humidity (*)
4 Sea Level PressurehPa
5 VisibilityKm
6 Wind Direction
7 Wind SpeedKm/h
8 Gust SpeedKm/h
9 Precipitationmm
10 Events (*)
11 Conditions (*)
12 WindDirDegrees
13 DateUTC
"""
weather_list=get_weather_timelet(time_slots)
# Convert symbols to Integer representaion
data_dict['Conditions'],Conditions_dict=symbol_to_state(data_dict['Conditions'])
data_dict['Events'],Events_dict=symbol_to_state(data_dict['Events'])
# Weather data to be used
weather_list_used = [weather_list[i] for i in [1,2,3,10,11]]
# All (sensor + weather) data to be used
data_used=weather_list_used + sensor_list
def verify_data_format(key_list):
# Verify there is no [] or N/A in the list
print 'Checking any inconsisent data format.....'
print '---------------------------------'
list_of_wrong_data_format=[]
for key in key_list:
print 'checking ', key, '...'
for i,samples in enumerate(data_dict[key]):
for j,each_sample in enumerate(samples):
if each_sample==[]:
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[j], 'in', key
elif (isinstance(each_sample,int)==False and isinstance(each_sample,float)==False):
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[j], 'in', key
print '---------------------------------'
if len(list_of_wrong_data_format)==0:
print ' no inconsistent data format'
return list_of_wrong_data_format
# Verify there is no [] or N/A in the list
list_of_wrong_data_format=verify_data_format(data_used)
if len(list_of_wrong_data_format)!=0:
raise NameError('Inconsistent data format in the list of data_used')
# Weighted averge to impute missing value
# Imputing missing data -using weighted mean value
hr_set=time_mat[:,HR_IDX].astype(int)
wd_set=time_mat[:,WD_IDX].astype(int)
day_set=time_mat[:,MD_IDX].astype(int)
mn_set=time_mat[:,MN_IDX].astype(int)
cumnum_days_mn=np.r_[0,np.array([calendar.monthrange(2013, i)[1] for i in np.r_[1:12]]).cumsum()]
daycount_set=[ int(day+cumnum_days_mn[mn-1]) for i,(day,mn) in enumerate(zip(day_set,mn_set))]
# X.shape (1258, 7)
# type(X) <type 'numpy.ndarray'>
# type(X) <type 'numpy.ndarray'>
num_of_data=len(data_used)
num_of_samples=len(time_slots)
X=np.zeros([num_of_samples,num_of_data])
INT_type_cols=[]
FLOAT_type_cols=[]
for j,key in enumerate(data_used):
for i,sample in enumerate(data_dict[key]):
if len(sample)==0:
X[i,j]=np.infty
elif isinstance(sample[0],int):
X[i,j]=int(stats.mode(sample)[0])
if i==0: INT_type_cols.append(j)
elif isinstance(sample[0],float):
X[i,j]=np.mean(sample)
if i==0: FLOAT_type_cols.append(j)
else:
raise NameError('Sample type must either INT or FLOAT type')
# If no data availalbe, then imputes the data by weighted mean
print 'Before imputation'
for i,key in enumerate(data_used):
print key
print [k for k in np.nonzero(X[:,i]==np.infty)[0]]
# If no data availalbe, then imputes the data by weighted mean
for i,key in enumerate(data_used):
for inf_idx in np.nonzero(X[:,i]==np.infty)[0]:
whgt_bottom_sum=0;whgt_top_sum=0
for h_idx in np.nonzero(hr_set==hr_set[inf_idx])[0]:
#import pdb; pdb.set_trace()
sample_temp=X[h_idx,i]
if (sample_temp<np.infty and h_idx!=inf_idx):
wght=1/np.abs(daycount_set[h_idx]-daycount_set[inf_idx])
whgt_bottom_sum=whgt_bottom_sum+wght
whgt_top_sum=whgt_top_sum+wght*sample_temp
new_sample=whgt_top_sum/whgt_bottom_sum
X[inf_idx,i]=new_sample
# If no data availalbe, then imputes the data by weighted mean
print 'After imputation'
for i,key in enumerate(data_used):
print key
print [k for k in np.nonzero(X[:,i]==np.infty)[0]]
# If no data availalbe, then imputes the data by weighted mean
X_INT=X[:,INT_type_cols]
X_FLOAT=X[:,FLOAT_type_cols]
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
edge_model.fit(X_FLOAT)
# Using mode if interger type, using mean if real type
"""
vak1_power_sys_sum=[]
vak1_power_p1_sum=[]
vak1_power_p2_sum=[]
vak1_power_p3_sum=[]
for i,(psys,p1,p2,p3) in enumerate(zip(vak1_power_sys,vak1_power_p1,vak1_power_p2,vak1_power_p3)):
vak1_power_sys_sum.append(sum(psys))
vak1_power_p1_sum.append(sum(p1))
vak1_power_p2_sum.append(sum(p2))
vak1_power_p3_sum.append(sum(p3))
plt.subplot(2,1,1)
plt.plot(vak1_power_sys_sum)
plt.plot(np.array(vak1_power_p1_sum)+np.array(vak1_power_p2_sum)+np.array(vak1_power_p3_sum),'-s')
plt.subplot(2,1,2)
plt.plot(vak1_power_p1_sum,'-*')
plt.plot(vak1_power_p2_sum,'-s')
plt.plot(vak1_power_p3_sum,'-o')
"""
# Using the following weather data for variables
#
# Regularized the weather data into a single time referece
# For symbolic data, use mode, and for real number data, use average
# Gaussian Process (GP) model and interploation for power consumption data
#Conditions_dict,Events_dict
"""
3D plotting
fig=pl.figure()
ax = p3.Axes3D(fig)
ax.scatter(gw2_power_p1_sum.T, gw2_power_p2_sum, gw2_power_p3_sum, c=colors)
ax.set_xlabel('P1')
ax.set_ylabel('P2')
ax.set_zlabel('P3')
fig.add_axes(ax)
"""
if argv_len>1:
print 'end of program'
plt.show()
|
gpl-2.0
|
wileeam/airflow
|
scripts/perf/scheduler_ops_metrics.py
|
4
|
7153
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
import pandas as pd
from airflow import settings
from airflow.configuration import conf
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils import timezone
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py [timeout]
You can specify timeout in seconds as an optional parameter.
Its default value is 6 seconds.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = [x for x in tis if x.state == State.SUCCESS]
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super().heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(timezone.utcnow() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(timezone.utcnow() - self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: %s', dr)
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: %s', ti)
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: %s is_paused=%s', dm, is_paused)
dm.is_paused = is_paused
session.commit()
def main():
global MAX_RUNTIME_SECS
if len(sys.argv) > 1:
try:
max_runtime_secs = int(sys.argv[1])
if max_runtime_secs < 1:
raise ValueError
MAX_RUNTIME_SECS = max_runtime_secs
except ValueError:
logging.error('Specify a positive integer for timeout.')
sys.exit(1)
conf.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
|
apache-2.0
|
jamesp/Isca
|
src/extra/python/scripts/modified_time_script.py
|
1
|
2922
|
import os
import numpy as np
import matplotlib.pyplot as plt
import pdb
import sys
from datetime import datetime
def calculate_month_run_time(exp_dir_list, plot_against_wall_time=True, file_to_use_for_timing = 'logfile.000000.out'):
"""A script that takes a list of experiment names as input, and plots the time taken to run each month in that experiment vs the wall time. """
try:
GFDL_DATA = os.environ['GFDL_DATA']
except Exception as e:
print('Environment variables GFDL_DATA must be set')
sys.exit(0)
for exp_dir in exp_dir_list:
exp_dir_full = GFDL_DATA+'/'+exp_dir+'/'
#Finds all the months for particular experiment
months_to_check=os.listdir(exp_dir_full)
try:
months_to_check.remove('restarts')
except:
pass
months_to_check.sort()
try:
months_to_check.remove('.DS_Store')
months_to_check.remove('._.DS_Store')
except ValueError:
pass
delta_t_arr=np.zeros(len(months_to_check)-1)
end_t_arr=[]
for month in np.arange(len(months_to_check)-1)+1:
#Calculates the time between the current month's folder being modified, and the previous month's folder being modified.
delta_t = os.path.getctime(exp_dir_full+months_to_check[month]+'/'+file_to_use_for_timing)-os.path.getctime(exp_dir_full+months_to_check[month-1]+'/'+file_to_use_for_timing)
#Converts this time to minutes from seconds:
delta_t_arr[month-1]=delta_t/60.
#Saves time of modification as python datetime object:
end_t_arr.append(datetime.fromtimestamp(os.path.getmtime(exp_dir_full+months_to_check[month])))
month_num_arr = [int(months_to_check[num].replace('run', '')) for num in range(len(months_to_check))]
months_idx_to_remove = [num for num in np.where(np.abs(delta_t_arr) > 10.*np.mean(delta_t_arr))[0]]
print(('removing anomalously long delta_t for months ', [month_num_arr[month] for month in months_idx_to_remove], [delta_t_arr[month] for month in months_idx_to_remove]))
delta_t_arr[np.where(np.abs(delta_t_arr) > 10.*np.mean(delta_t_arr))] = np.nan
#Plots results for particular experiment
if plot_against_wall_time:
plt.plot(end_t_arr,delta_t_arr, label=exp_dir)
plt.xlabel('Wall time (GMT)')
else:
plt.plot(month_num_arr[:-1], delta_t_arr, label=exp_dir)
plt.xlabel('Month number')
plt.legend()
plt.ylabel('Wall time elapsed per month (minutes)')
if __name__=="__main__":
exp_dir_list = ['bog_fixed_sst_low_bog_a_ocean_topog_85']
calculate_month_run_time(exp_dir_list, plot_against_wall_time=False, file_to_use_for_timing='git_hash_used.txt')
plt.show()
|
gpl-3.0
|
fulmicoton/pylearn2
|
pylearn2/train_extensions/tests/test_wmape_channel.py
|
32
|
2531
|
"""
Tests for WMAPE.
"""
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
from theano.compile import function
import numpy as np
from numpy.testing import assert_allclose
def test_wmape():
"""Test WMapeChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
X = trainer.model.get_input_space().make_theano_batch()
Y = trainer.model.fprop(X)
f = function([X], Y, allow_input_downcast=True)
y_hat = f(trainer.dataset.X)
wmape_num_exp = abs(trainer.dataset.y - y_hat).sum()
wmape_den_exp = abs(trainer.dataset.y).sum()
exp_array = np.asarray([wmape_num_exp, wmape_den_exp])
wmape_num_real = trainer.model.monitor.channels['train_wmape_num'].\
val_record
wmape_den_real = trainer.model.monitor.channels['train_wmape_den'].\
val_record
real_array = np.asarray([wmape_num_real[-1], wmape_den_real[-1]])
assert_allclose(exp_array, real_array)
test_yaml = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.\
random_dense_design_matrix_for_regression
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
reg_min: 1,
reg_max: 1000
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Linear {
layer_name: y,
dim: 1,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_wmape_num,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.wmape_channel.WMapeNumeratorChannel {},
!obj:pylearn2.train_extensions.wmape_channel.\
WMapeDenominatorChannel {},
],
}
"""
|
bsd-3-clause
|
bertrand-l/numpy
|
numpy/lib/recfunctions.py
|
5
|
34779
|
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
if sys.version_info[0] >= 3:
zip_longest = itertools.zip_longest
else:
zip_longest = itertools.izip_longest
for tup in zip_longest(*seqarrays, fillvalue=fill_value):
yield tuple(zipfunc(tup))
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
|
bsd-3-clause
|
lele94218/social_network_paper
|
facebook/getdata/clustering_example.py
|
1
|
1149
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 08 19:56:28 2014
@author: Keine
"""
import sqlite3
cx = sqlite3.connect("../text2DB/get_data.db")
distxy = [([0.0] * 49) for i in range(49)]
cu = cx.cursor()
for i in range(1, 50):
for j in range(1, 50):
if i == j:
distxy[i-1][j-1] = 0.0
else:
sql = cu.execute("""select similarity from user_similarity where id1 = ? and id2 = ?""", (i,j))
sim = 1.0 - float(sql.fetchone()[0]) - 0.98
distxy[i-1][j-1] = sim
cx.close();
#print distxy[49-1][48-1]
#from scipy.cluster.hierarchy import linkage, dendrogram
#R = dendrogram(linkage(distxy, method='complete'))
#suptitle('Cluster Dendrogram', fontweight='bold', fontsize=14);
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
data_dist = pdist(distxy) # computing the distance
data_link = linkage(data_dist) # computing the linkage
dendrogram(data_link)
plt.xlabel('User_ID')
plt.ylabel('Similarity ratio')
plt.suptitle('Hierarchy Clustering', fontweight='bold', fontsize=14);
|
gpl-2.0
|
paalge/scikit-image
|
doc/examples/filters/plot_entropy.py
|
9
|
2234
|
"""
=======
Entropy
=======
In information theory, information entropy is the log-base-2 of the number of
possible outcomes for a message.
For an image, local entropy is related to the complexity contained in a given
neighborhood, typically defined by a structuring element. The entropy filter can
detect subtle variations in the local gray level distribution.
In the first example, the image is composed of two surfaces with two slightly
different distributions. The image has a uniform random distribution in the
range [-14, +14] in the middle of the image and a uniform random distribution in
the range [-15, 15] at the image borders, both centered at a gray value of 128.
To detect the central square, we compute the local entropy measure using a
circular structuring element of a radius big enough to capture the local gray
level distribution. The second example shows how to detect texture in the camera
image using a smaller structuring element.
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
# First example: object detection.
noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30
noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
noise_mask).astype(np.uint8)
img = noise + 128
entr_img = entropy(img, disk(10))
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 3))
ax0.imshow(noise_mask, cmap=plt.cm.gray)
ax0.set_xlabel("Noise mask")
ax1.imshow(img, cmap=plt.cm.gray)
ax1.set_xlabel("Noisy image")
ax2.imshow(entr_img)
ax2.set_xlabel("Local entropy")
fig.tight_layout()
# Second example: texture detection.
image = img_as_ubyte(data.camera())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 4), sharex=True,
sharey=True,
subplot_kw={"adjustable": "box-forced"})
img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)
img1 = ax1.imshow(entropy(image, disk(5)), cmap=plt.cm.gray)
ax1.set_title("Entropy")
ax1.axis("off")
fig.colorbar(img1, ax=ax1)
fig.tight_layout()
plt.show()
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/examples/axes_grid/demo_curvelinear_grid.py
|
16
|
4116
|
import numpy as np
#from matplotlib.path import Path
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist import SubplotHost, \
ParasiteAxesAuxTrans
def curvelinear_test1(fig):
"""
grid for custom transform.
"""
def tr(x, y):
x, y = np.asarray(x), np.asarray(y)
return x, y-x
def inv_tr(x,y):
x, y = np.asarray(x), np.asarray(y)
return x, y+x
grid_helper = GridHelperCurveLinear((tr, inv_tr))
ax1 = Subplot(fig, 1, 2, 1, grid_helper=grid_helper)
# ax1 will have a ticks and gridlines defined by the given
# transform (+ transData of the Axes). Note that the transform of
# the Axes itself (i.e., transData) is not affected by the given
# transform.
fig.add_subplot(ax1)
xx, yy = tr([3, 6], [5.0, 10.])
ax1.plot(xx, yy)
ax1.set_aspect(1.)
ax1.set_xlim(0, 10.)
ax1.set_ylim(0, 10.)
ax1.axis["t"]=ax1.new_floating_axis(0, 3.)
ax1.axis["t2"]=ax1.new_floating_axis(1, 7.)
ax1.grid(True)
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 2, 2, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if 1:
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
curvelinear_test1(fig)
curvelinear_test2(fig)
plt.draw()
plt.show()
|
mit
|
krikru/tensorflow-opencl
|
tensorflow/tools/dist_test/python/census_widendeep.py
|
54
|
11900
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columsn (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/frame/test_convert_to.py
|
1
|
21707
|
# -*- coding: utf-8 -*-
import collections
from collections import OrderedDict, defaultdict
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas.compat import long
from pandas import (
CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, compat,
date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
class TestDataFrameConvertTo(TestData):
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
assert (test_data.to_dict(orient='records') ==
expected_records)
assert (test_data_mixed.to_dict(orient='records') ==
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_index_not_unique_with_index_orient(self):
# GH22801
# Data loss when indexes are not unique. Raise ValueError.
df = DataFrame({'a': [1, 2], 'b': [0.5, 0.75]}, index=['A', 'A'])
pytest.raises(ValueError, df.to_dict, orient='index')
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
pytest.raises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
# convert_datetime64 defaults to None
expected = df.index.values[0]
result = df.to_records()['index'][0]
assert expected == result
# check for FutureWarning if convert_datetime64=False is passed
with tm.assert_produces_warning(FutureWarning):
expected = df.index.values[0]
result = df.to_records(convert_datetime64=False)['index'][0]
assert expected == result
# check for FutureWarning if convert_datetime64=True is passed
with tm.assert_produces_warning(FutureWarning):
expected = df.index[0]
result = df.to_records(convert_datetime64=True)['index'][0]
assert expected == result
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
assert 'bar' in r
assert 'one' not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
compat.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <[email protected]>\n'
'To: <[email protected]>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all(x in frame for x in ['Type', 'Subject', 'From'])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
assert 'X' in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert 'index' in rs.dtype.fields
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
assert 'level_0' in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH13172
# unicode_literals conflict with to_records
result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a') \
.to_records()
expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue #11879. to_records used to raise an exception when used
# with column names containing non-ascii characters in Python 2
result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionary instead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
"formats": ['=i8', '=f8']}
)
tm.assert_almost_equal(result, expected)
def test_to_records_with_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("kwargs,expected", [
# No dtypes --> default to array dtypes.
(dict(),
np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "<i8"), ("A", "<i8"),
("B", "<f8"), ("C", "O")])),
# Should have no effect in this case.
(dict(index=True),
np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "<i8"), ("A", "<i8"),
("B", "<f8"), ("C", "O")])),
# Column dtype applied across the board. Index unaffected.
(dict(column_dtypes="<U4"),
np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U4"),
("B", "<U4"), ("C", "<U4")])),
# Index dtype applied across the board. Columns unaffected.
(dict(index_dtypes="<U1"),
np.rec.array([("0", 1, 0.2, "a"), ("1", 2, 1.5, "bc")],
dtype=[("index", "<U1"), ("A", "<i8"),
("B", "<f8"), ("C", "O")])),
# Pass in a type instance.
(dict(column_dtypes=np.unicode),
np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U"),
("B", "<U"), ("C", "<U")])),
# Pass in a dtype instance.
(dict(column_dtypes=np.dtype('unicode')),
np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U"),
("B", "<U"), ("C", "<U")])),
# Pass in a dictionary (name-only).
(dict(column_dtypes={"A": np.int8, "B": np.float32, "C": "<U2"}),
np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"),
("B", "<f4"), ("C", "<U2")])),
# Pass in a dictionary (indices-only).
(dict(index_dtypes={0: "int16"}),
np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "i2"), ("A", "<i8"),
("B", "<f8"), ("C", "O")])),
# Ignore index mappings if index is not True.
(dict(index=False, index_dtypes="<U2"),
np.rec.array([(1, 0.2, "a"), (2, 1.5, "bc")],
dtype=[("A", "<i8"), ("B", "<f8"), ("C", "O")])),
# Non-existent names / indices in mapping should not error.
(dict(index_dtypes={0: "int16", "not-there": "float32"}),
np.rec.array([(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "i2"), ("A", "<i8"),
("B", "<f8"), ("C", "O")])),
# Names / indices not in mapping default to array dtype.
(dict(column_dtypes={"A": np.int8, "B": np.float32}),
np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"),
("B", "<f4"), ("C", "O")])),
# Names / indices not in dtype mapping default to array dtype.
(dict(column_dtypes={"A": np.dtype('int8'), "B": np.dtype('float32')}),
np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"),
("B", "<f4"), ("C", "O")])),
# Mixture of everything.
(dict(column_dtypes={"A": np.int8, "B": np.float32},
index_dtypes="<U2"),
np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<U2"), ("A", "i1"),
("B", "<f4"), ("C", "O")])),
# Invalid dype values.
(dict(index=False, column_dtypes=list()),
(ValueError, "Invalid dtype \\[\\] specified for column A")),
(dict(index=False, column_dtypes={"A": "int32", "B": 5}),
(ValueError, "Invalid dtype 5 specified for column B")),
# Numpy can't handle EA types, so check error is raised
(dict(index=False, column_dtypes={"A": "int32",
"B": CategoricalDtype(['a', 'b'])}),
(ValueError, 'Invalid dtype category specified for column B')),
# Check that bad types raise
(dict(index=False, column_dtypes={"A": "int32", "B": "foo"}),
(TypeError, 'data type "foo" not understood')),
])
def test_to_records_dtype(self, kwargs, expected):
# see gh-18146
df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
if not isinstance(expected, np.recarray):
with pytest.raises(expected[0], match=expected[1]):
df.to_records(**kwargs)
else:
result = df.to_records(**kwargs)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("df,kwargs,expected", [
# MultiIndex in the index.
(DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=list("abc")).set_index(["a", "b"]),
dict(column_dtypes="float64", index_dtypes={0: "int32", 1: "int8"}),
np.rec.array([(1, 2, 3.), (4, 5, 6.), (7, 8, 9.)],
dtype=[("a", "<i4"), ("b", "i1"), ("c", "<f8")])),
# MultiIndex in the columns.
(DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=MultiIndex.from_tuples([("a", "d"), ("b", "e"),
("c", "f")])),
dict(column_dtypes={0: "<U1", 2: "float32"}, index_dtypes="float32"),
np.rec.array([(0., u"1", 2, 3.), (1., u"4", 5, 6.),
(2., u"7", 8, 9.)],
dtype=[("index", "<f4"),
("('a', 'd')", "<U1"),
("('b', 'e')", "<i8"),
("('c', 'f')", "<f4")])),
# MultiIndex in both the columns and index.
(DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=MultiIndex.from_tuples([
("a", "d"), ("b", "e"), ("c", "f")], names=list("ab")),
index=MultiIndex.from_tuples([
("d", -4), ("d", -5), ("f", -6)], names=list("cd"))),
dict(column_dtypes="float64", index_dtypes={0: "<U2", 1: "int8"}),
np.rec.array([("d", -4, 1., 2., 3.), ("d", -5, 4., 5., 6.),
("f", -6, 7, 8, 9.)],
dtype=[("c", "<U2"), ("d", "i1"),
("('a', 'd')", "<f8"), ("('b', 'e')", "<f8"),
("('c', 'f')", "<f8")]))
])
def test_to_records_dtype_mi(self, df, kwargs, expected):
# see gh-18146
result = df.to_records(**kwargs)
tm.assert_almost_equal(result, expected)
def test_to_records_dict_like(self):
# see gh-18146
class DictLike(object):
def __init__(self, **kwargs):
self.d = kwargs.copy()
def __getitem__(self, key):
return self.d.__getitem__(key)
def __contains__(self, key):
return key in self.d
def keys(self):
return self.d.keys()
df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
dtype_mappings = dict(column_dtypes=DictLike(**{"A": np.int8,
"B": np.float32}),
index_dtypes="<U2")
result = df.to_records(**dtype_mappings)
expected = np.rec.array([("0", "1", "0.2", "a"),
("1", "2", "1.5", "bc")],
dtype=[("index", "<U2"), ("A", "i1"),
("B", "<f4"), ("C", "O")])
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('mapping', [
dict,
collections.defaultdict(list),
collections.OrderedDict])
def test_to_dict(self, mapping):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
# GH16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s", mapping)
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp", mapping)
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r", mapping)
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': np.nan, 'B': '3'}]
assert isinstance(recons_data, list)
assert (len(recons_data) == 3)
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
df = DataFrame(test_data)
df['duped'] = df[df.columns[0]]
recons_data = df.to_dict("i")
comp_data = test_data.copy()
comp_data['duped'] = comp_data[df.columns[0]]
for k, v in compat.iteritems(comp_data):
for k2, v2 in compat.iteritems(v):
assert (v2 == recons_data[k2][k])
@pytest.mark.parametrize('mapping', [
list,
collections.defaultdict,
[]])
def test_to_dict_errors(self, mapping):
# GH16122
df = DataFrame(np.random.randn(3, 3))
with pytest.raises(TypeError):
df.to_dict(into=mapping)
def test_to_dict_not_unique_warning(self):
# GH16927: When converting to a dict, if a column has a non-unique name
# it will be dropped, throwing a warning.
df = DataFrame([[1, 2, 3]], columns=['a', 'a', 'b'])
with tm.assert_produces_warning(UserWarning):
df.to_dict()
@pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])
def test_to_records_datetimeindex_with_tz(self, tz):
# GH13937
dr = date_range('2016-01-01', periods=10,
freq='S', tz=tz)
df = DataFrame({'datetime': dr}, index=dr)
expected = df.to_records()
result = df.tz_convert("UTC").to_records()
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
# orient - orient argument to to_dict function
# item_getter - function for extracting value from
# the resulting dict using column name and index
@pytest.mark.parametrize('orient,item_getter', [
('dict', lambda d, col, idx: d[col][idx]),
('records', lambda d, col, idx: d[idx][col]),
('list', lambda d, col, idx: d[col][idx]),
('split', lambda d, col, idx: d['data'][idx][d['columns'].index(col)]),
('index', lambda d, col, idx: d[idx][col])
])
def test_to_dict_box_scalars(self, orient, item_getter):
# 14216, 23753
# make sure that we are boxing properly
df = DataFrame({'a': [1, 2], 'b': [.1, .2]})
result = df.to_dict(orient=orient)
assert isinstance(item_getter(result, 'a', 0), (int, long))
assert isinstance(item_getter(result, 'b', 0), float)
def test_frame_to_dict_tz(self):
# GH18372 When converting to dict with orient='records' columns of
# datetime that are tz-aware were not converted to required arrays
data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
(datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)]
df = DataFrame(list(data), columns=["d", ])
result = df.to_dict(orient='records')
expected = [
{'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)},
{'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)},
]
tm.assert_dict_equal(result[0], expected[0])
tm.assert_dict_equal(result[1], expected[1])
@pytest.mark.parametrize('into, expected', [
(dict, {0: {'int_col': 1, 'float_col': 1.0},
1: {'int_col': 2, 'float_col': 2.0},
2: {'int_col': 3, 'float_col': 3.0}}),
(OrderedDict, OrderedDict([(0, {'int_col': 1, 'float_col': 1.0}),
(1, {'int_col': 2, 'float_col': 2.0}),
(2, {'int_col': 3, 'float_col': 3.0})])),
(defaultdict(list), defaultdict(list,
{0: {'int_col': 1, 'float_col': 1.0},
1: {'int_col': 2, 'float_col': 2.0},
2: {'int_col': 3, 'float_col': 3.0}}))
])
def test_to_dict_index_dtypes(self, into, expected):
# GH 18580
# When using to_dict(orient='index') on a dataframe with int
# and float columns only the int columns were cast to float
df = DataFrame({'int_col': [1, 2, 3],
'float_col': [1.0, 2.0, 3.0]})
result = df.to_dict(orient='index', into=into)
cols = ['int_col', 'float_col']
result = DataFrame.from_dict(result, orient='index')[cols]
expected = DataFrame.from_dict(expected, orient='index')[cols]
tm.assert_frame_equal(result, expected)
def test_to_dict_numeric_names(self):
# https://github.com/pandas-dev/pandas/issues/24940
df = DataFrame({str(i): [i] for i in range(5)})
result = set(df.to_dict('records')[0].keys())
expected = set(df.columns)
assert result == expected
def test_to_dict_wide(self):
# https://github.com/pandas-dev/pandas/issues/24939
df = DataFrame({('A_{:d}'.format(i)): [i] for i in range(256)})
result = df.to_dict('records')[0]
expected = {'A_{:d}'.format(i): i for i in range(256)}
assert result == expected
|
bsd-3-clause
|
sarahgrogan/scikit-learn
|
examples/neighbors/plot_digits_kde_sampling.py
|
251
|
2022
|
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
|
bsd-3-clause
|
jyt109/BDA_py_demos
|
demos_ch10/demo10_2.py
|
19
|
1606
|
"""Bayesian data analysis
Chapter 10, demo 2
Importance sampling example
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=0)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
plt.rc('patch', facecolor='#bfe2ff')
# fake interesting distribution
x = np.linspace(-3, 3, 200)
r = np.array([ 1.1 , 1.3 , -0.1 , -0.7 , 0.2 , -0.4 , 0.06, -1.7 ,
1.7 , 0.3 , 0.7 , 1.6 , -2.06, -0.74, 0.2 , 0.5 ])
# Estimate the density (named q, to emphesize that it does not need to be
# normalized). Parameter bw_method=0.48 is used to mimic the outcome of the
# kernelp function in Matlab.
q_func = stats.gaussian_kde(r, bw_method=0.48)
q = q_func.evaluate(x)
# importance sampling example
g = stats.norm.pdf(x)
w = q/g
r = np.random.randn(100)
r = r[np.abs(r) < 3] # remove samples out of the grid
wr = q_func.evaluate(r)/stats.norm.pdf(r)
# plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(10,8))
axes[0].plot(x, q, label=r'$q(\theta|y)$')
axes[0].plot(x, g, label=r'$g(\theta)$')
axes[0].set_yticks(())
axes[0].set_title('target and proposal distributions')
axes[0].legend()
axes[1].plot(x, w, label=r'$q(\theta|y)/g(\theta)$')
axes[1].set_title('samples and importance weights')
axes[1].vlines(r, 0, wr, color='#377eb8', alpha=0.4)
axes[1].set_ylim((0,axes[1].get_ylim()[1]))
axes[1].legend()
plt.show()
|
gpl-3.0
|
braineo/sudokuSolver
|
code_tester/mnist.py
|
1
|
1939
|
import os
import struct
import numpy as np
"""
Parse images from MNIST binary file.
Idea from https://gist.github.com/akesling/5358964
"""
class MNISTdataset(object):
def __init__(self, pathToDataset='.'):
self._pathToDataset = pathToDataset
return
def read(self, dataset="training"):
if dataset is "training":
fname_img = os.path.join(
self._pathToDataset, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(
self._pathToDataset, 'train-labels.idx1-ubyte')
elif dataset is "testing":
fname_img = os.path.join(
self._pathToDataset, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(
self._pathToDataset, 't10k-labels.idx1-ubyte')
else:
raise ValueError, "dataset must be 'testing' or 'training'"
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(
len(lbl), rows, cols)
return img, lbl
def showImage(self, image):
"""
:param image: numpy.uint8 2D array of pixel data
:return: None
"""
from matplotlib import pyplot
import matplotlib
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
imgplot = ax.imshow(image, cmap=matplotlib.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
pyplot.show()
dataset = MNISTdataset('../training_data/')
image, label = dataset.read()
np.save('imageTrain', image)
np.save('labelTrain', label)
|
gpl-2.0
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/externals/joblib/__init__.py
|
54
|
5087
|
"""Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
========================= ================================================
**User documentation:** http://pythonhosted.org/joblib
**Download packages:** http://pypi.python.org/pypi/joblib#downloads
**Source code:** http://github.com/joblib/joblib
**Report issues:** http://github.com/joblib/joblib/issues
========================= ================================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.11'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
|
mit
|
eor/STARDUST
|
scripts/sed_generator/sed_stellar_mass.py
|
1
|
5947
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------
# About
#-----------------------------------------------------------------
# This file contains functions to compute the stellar mass for a given
# dark matter halo of mass M
#-----------------------------------------------------------------
# Libs
#-----------------------------------------------------------------
import math as m
import sed_user_settings as us
#-----------------------------------------------------------------
# Cosmological parameters
#-----------------------------------------------------------------
# TODO: get these frome the pipeline later
cosmoOmegaM = us.cosmoOmegaM
cosmoOmegaB = us.cosmoOmegaB
#-----------------------------------------------------------------
# Compute stellar mass - the cheap way
#-----------------------------------------------------------------
def compute_stellar_mass_simple( haloMass, fStar=0.1 ):
# This is how stellar masses were computed in the
# Thomas & Zaroubi version of STARDUST
stellarMass = fStar * (cosmoOmegaB/cosmoOmegaM) * haloMass
return stellarMass
#-----------------------------------------------------------------
# Compute stellar mass
#-----------------------------------------------------------------
def compute_stellar_mass( haloMass, redshift, verbose=False ):
# This function computes the stellar mass for a given
# redshift and mass of a dark matter halo
# References:
# [1]: Behroozi, Wechsler, & Conroy, 2013, ApJ 770:57 (for 0<z<8)
# TODO:
# - find a better solution for higher redshifts
z = redshift
a = 1./(1.+ z)
# equation parameters
M_10 = 11.514
M_1a = -1.793
M_1z = -0.251
epsilon_0 = -1.777
epsilon_a = -0.006
epsilon_a2 = -0.119
epsilon_z = -0.000
alpha_0 = -1.412
alpha_a = 0.731
delta_0 = 3.508
delta_a = 2.608
delta_z = -0.043
gamma_0 = 0.316
gamma_a = 1.319
gamma_z = 0.279
# equations (4) from [1]
nu = m.exp(-4.*a*a)
M_1 = 10**( M_10 + ( M_1a*(a-1.) + M_1z*(z) )*nu )
epsilon = 10**( epsilon_0 + ( epsilon_a*(a-1.) + epsilon_z*(z) )*nu + epsilon_a2*(a-1.) )
alpha = alpha_0 + ( alpha_a*(a-1.) )*nu
delta = delta_0 + ( delta_a*(a-1.) + delta_z*(z) )*nu
gamma = gamma_0 + ( gamma_a*(a-1.) + gamma_z*(z) )*nu
# equations (3) from [1]
x = m.log10( haloMass / M_1 )
try:
divisor = 1.+ m.exp( 10**(-x) )
except OverflowError:
# if x < -2.849, m.exp( 10**(-x) ) becomes too large (>1e307)
divisor = 1e307
frac = ( ( m.log10( 1.+m.exp(x) ) )**gamma )/ divisor
f_x = (-1)*m.log10( 10**(alpha*x) + 1.0 ) + delta*( frac )
x = 0
frac = ( ( m.log10( 1.+m.exp(x) ) )**gamma )/ ( 1.+ m.exp( 10**(-x) ) )
f_0 = (-1)*m.log10( 10**(alpha*x) + 1.0 ) + delta*( frac )
tmpM = m.log10( epsilon*M_1 ) + f_x - f_0
stellarMass = 10**(tmpM)
if(verbose):
print "z = %.4f\t M_halo = %e \t M_stellar = %e \t M_stellar/M_halo= %f"%(z, m.log10(haloMass), m.log10(stellarMass), m.log10(stellarMass/haloMass) )
return stellarMass
#-----------------------------------------------------------------
# Testing
#-----------------------------------------------------------------
if __name__ == "__main__":
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#-----------------------------------------------------------------
# Test run: Compare difference for fixed z (change M_halo)
#-----------------------------------------------------------------
mStartLog = 8.0
fStar = 0.1
z = 16.0
mHaloLog = np.arange(8, 15, 0.05)
mStarLog1 = np.zeros(len(mHaloLog))
mStarLog2 = np.zeros(len(mHaloLog))
for i in range(0,len(mHaloLog)):
mStarLog1[i] = m.log10( compute_stellar_mass_simple( 10**mHaloLog[i], fStar) )
mStarLog2[i] = m.log10( compute_stellar_mass( 10**mHaloLog[i], z) )
fig = plt.figure()
fig.clear()
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('log halo mass')
ax.set_ylabel('log stellar mass')
#ax.set_ylim(1, 1e6)
#ax.set_yscale('log')
ax.minorticks_on()
ax.plot(mHaloLog, mStarLog1, lw=2.0, color="blue", label='Simple')
ax.plot(mHaloLog, mStarLog2, lw=2.0, color="red", label='Behroozi++')
ax.legend(loc=4)
fig.suptitle('Redshift = %.3f'%z)
fig.savefig('compare_Mstar_z%.3f.png'%z)
plt.close(fig)
#-----------------------------------------------------------------
# Test run: Compare difference for fixed M (z-evolution)
#-----------------------------------------------------------------
zList = np.arange(6,15, 0.05)
mHaloLogFix = 13
mStarLog1 = np.zeros(len(zList))
mStarLog2 = np.zeros(len(zList))
for i in range(0,len(zList)):
#mHaloLog[i] = mStartLog + i*dM
mStarLog1[i] = m.log10( compute_stellar_mass_simple( 10**mHaloLogFix, fStar) )
mStarLog2[i] = m.log10( compute_stellar_mass( 10**mHaloLogFix, zList[i]) )
fig = plt.figure()
fig.clear()
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Redshift')
ax.set_ylabel('log Stellar mass')
#ax.set_ylim(1, 1e6)
#ax.set_yscale('log')
ax.minorticks_on()
ax.plot(zList, mStarLog1, lw=2.0, color="blue", label='Simple')
ax.plot(zList, mStarLog2, lw=2.0, color="red", label='Behroozi++')
ax.legend(loc=5)
fig.suptitle('log halo mass = %.3f'%mHaloLogFix)
fig.savefig('compare_Mstar_M%.3f.png'%mHaloLogFix)
plt.close(fig)
|
gpl-3.0
|
adamdempsey90/fargo3d
|
ppmovie.py
|
1
|
2000
|
#!/usr/bin/env python
import os
from sys import argv
import multiprocessing
import matplotlib
matplotlib.use('Agg')
class imgfile:
def __init__(self,q,num,fnum,dirname,iso,fnamebase,imgext):
self.fnum = fnum
self.num = num
self.fname = dirname + fnamebase + '%03d'%num + imgext
self.q = q
self.iso = iso
def save_image(img):
print 'Starting %d' % img.num
fld = fargo(img.fnum,img.iso)
fld.plot(img.q,output=True,fname=img.fname)
print 'Done %d' % img.num
#def save_images(q,filelist,iso=True,dirname='./',fnamebase='image',imgext='.png'):
# if dirname[-1] != '/':
# dirname += '/'
# totnum = len(filelist)
# for i,j in enumerate(filelist):
# fld = fargo(j,iso)
# fname = dirname + fnamebase + '%03d'%i + imgext
# print 'Saving image %d/%d to %s...\t %.2f%% done' % (i,totnum,fname,float(i)/float(totnum)*100)
# fld.plot(q,output=True,fname=fname)
# First argument is directory name
# second argument is quantity to plot, see fargo class plot method for values
# third argument is number of files to load
# fourth argument is total number of dumps
# fifth argument is output directory in first argument directory
# sixth argument is extension, defaults to png if not given
parallel = False
numargs = len(argv)
print 'Importing fargo python module'
execfile('utils/quickreader.py')
dirname = argv[1]
q = argv[2]
numfiles = int(argv[3])
totfiles = int(argv[4])
imgdir = argv[5]
if numargs == 7:
ext = argv[6]
else:
ext = '.png'
filelist = range(totfiles)[::totfiles/numfiles]
print 'Changing to %s directory' % dirname
os.chdir(dirname)
try:
print 'Making %s directory' % imgdir
os.mkdir(imgdir)
except:
pass
if imgdir[-1] != '/':
imgdir += '/'
imgs = [imgfile(q,i,j,imgdir,True,q,ext) for i,j in enumerate(filelist)]
if parallel:
np = 20
print 'Using %d processes' % np
pool = multiprocessing.Pool(np)
pool.map(save_image,imgs)
else:
for x in imgs:
save_image(x)
#save_images(q,filelist,dirname=imgdir,fnamebase=q,imgext=ext);
print 'Finished.'
|
gpl-3.0
|
pprett/scikit-learn
|
sklearn/neighbors/tests/test_lof.py
|
34
|
4142
|
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from math import sqrt
import numpy as np
from sklearn import neighbors
from numpy.testing import assert_array_equal
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.datasets import load_iris
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_lof():
# Toy sample (the last two samples are outliers):
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]]
# Test LocalOutlierFactor:
clf = neighbors.LocalOutlierFactor(n_neighbors=5)
score = clf.fit(X).negative_outlier_factor_
assert_array_equal(clf._fit_X, X)
# Assert largest outlier score is smaller than smallest inlier score:
assert_greater(np.min(score[:-2]), np.max(score[-2:]))
# Assert predict() works:
clf = neighbors.LocalOutlierFactor(contamination=0.25,
n_neighbors=5).fit(X)
assert_array_equal(clf._predict(), 6 * [1] + 2 * [-1])
def test_lof_performance():
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = neighbors.LocalOutlierFactor().fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf._decision_function(X_test)
# check that roc_auc is good
assert_greater(roc_auc_score(y_test, y_pred), .99)
def test_lof_values():
# toy samples:
X_train = [[1, 1], [1, 2], [2, 1]]
clf = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X_train)
s_0 = 2. * sqrt(2.) / (1. + sqrt(2.))
s_1 = (1. + sqrt(2)) * (1. / (4. * sqrt(2.)) + 1. / (2. + 2. * sqrt(2)))
# check predict()
assert_array_almost_equal(-clf.negative_outlier_factor_, [s_0, s_1, s_1])
# check predict(one sample not in train)
assert_array_almost_equal(-clf._decision_function([[2., 2.]]), [s_0])
# # check predict(one sample already in train)
assert_array_almost_equal(-clf._decision_function([[1., 1.]]), [s_1])
def test_lof_precomputed(random_state=42):
"""Tests LOF with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
# As a feature matrix (n_samples by n_features)
lof_X = neighbors.LocalOutlierFactor(n_neighbors=3)
lof_X.fit(X)
pred_X_X = lof_X._predict()
pred_X_Y = lof_X._predict(Y)
# As a dense distance matrix (n_samples by n_samples)
lof_D = neighbors.LocalOutlierFactor(n_neighbors=3, algorithm='brute',
metric='precomputed')
lof_D.fit(DXX)
pred_D_X = lof_D._predict()
pred_D_Y = lof_D._predict(DYX)
assert_array_almost_equal(pred_X_X, pred_D_X)
assert_array_almost_equal(pred_X_Y, pred_D_Y)
def test_n_neighbors_attribute():
X = iris.data
clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
clf = neighbors.LocalOutlierFactor(n_neighbors=500)
assert_warns_message(UserWarning,
"n_neighbors will be set to (n_samples - 1)",
clf.fit, X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
|
bsd-3-clause
|
toobaz/pandas
|
pandas/tests/indexing/multiindex/test_setitem.py
|
1
|
14883
|
import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, isna, notna
import pandas.core.common as com
from pandas.util import testing as tm
class TestMultiIndexSetItem:
def test_setitem_multiindex(self):
for index_fn in ("loc",):
def assert_equal(a, b):
assert a == b
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = MultiIndex.from_product(
[np.arange(0, 100), np.arange(0, 80)], names=["time", "firm"]
)
t, n = 0, 2
df = DataFrame(
np.nan,
columns=["A", "w", "l", "a", "x", "X", "d", "profit"],
index=index,
)
check(target=df, indexers=((t, n), "X"), value=0, compare_fn=assert_equal)
df = DataFrame(
-999, columns=["A", "w", "l", "a", "x", "X", "d", "profit"], index=index
)
check(target=df, indexers=((t, n), "X"), value=1, compare_fn=assert_equal)
df = DataFrame(
columns=["A", "w", "l", "a", "x", "X", "d", "profit"], index=index
)
check(target=df, indexers=((t, n), "X"), value=2, compare_fn=assert_equal)
# gh-7218: assigning with 0-dim arrays
df = DataFrame(
-999, columns=["A", "w", "l", "a", "x", "X", "d", "profit"], index=index
)
check(
target=df,
indexers=((t, n), "X"),
value=np.array(3),
compare_fn=assert_equal,
expected=3,
)
# GH5206
df = DataFrame(
np.arange(25).reshape(5, 5), columns="A,B,C,D,E".split(","), dtype=float
)
df["F"] = 99
row_selection = df["A"] % 2 == 0
col_selection = ["B", "C"]
df.loc[row_selection, col_selection] = df["F"]
output = DataFrame(99.0, index=[0, 2, 4], columns=["B", "C"])
tm.assert_frame_equal(df.loc[row_selection, col_selection], output)
check(
target=df,
indexers=(row_selection, col_selection),
value=df["F"],
compare_fn=tm.assert_frame_equal,
expected=output,
)
# GH11372
idx = MultiIndex.from_product(
[["A", "B", "C"], date_range("2015-01-01", "2015-04-01", freq="MS")]
)
cols = MultiIndex.from_product(
[["foo", "bar"], date_range("2016-01-01", "2016-02-01", freq="MS")]
)
df = DataFrame(np.random.random((12, 4)), index=idx, columns=cols)
subidx = MultiIndex.from_tuples(
[("A", Timestamp("2015-01-01")), ("A", Timestamp("2015-02-01"))]
)
subcols = MultiIndex.from_tuples(
[("foo", Timestamp("2016-01-01")), ("foo", Timestamp("2016-02-01"))]
)
vals = DataFrame(np.random.random((2, 2)), index=subidx, columns=subcols)
check(
target=df,
indexers=(subidx, subcols),
value=vals,
compare_fn=tm.assert_frame_equal,
)
# set all columns
vals = DataFrame(np.random.random((2, 4)), index=subidx, columns=cols)
check(
target=df,
indexers=(subidx, slice(None, None, None)),
value=vals,
compare_fn=tm.assert_frame_equal,
)
# identity
copy = df.copy()
check(
target=df,
indexers=(df.index, df.columns),
value=df,
compare_fn=tm.assert_frame_equal,
expected=copy,
)
def test_multiindex_setitem(self):
# GH 3738
# setting with a multi-index right hand side
arrays = [
np.array(["bar", "bar", "baz", "qux", "qux", "bar"]),
np.array(["one", "two", "one", "one", "two", "one"]),
np.arange(0, 6, 1),
]
df_orig = DataFrame(
np.random.randn(6, 3), index=arrays, columns=["A", "B", "C"]
).sort_index()
expected = df_orig.loc[["bar"]] * 2
df = df_orig.copy()
df.loc[["bar"]] *= 2
tm.assert_frame_equal(df.loc[["bar"]], expected)
# raise because these have differing levels
with pytest.raises(TypeError):
df.loc["bar"] *= 2
# from SO
# http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
df_orig = DataFrame.from_dict(
{
"price": {
("DE", "Coal", "Stock"): 2,
("DE", "Gas", "Stock"): 4,
("DE", "Elec", "Demand"): 1,
("FR", "Gas", "Stock"): 5,
("FR", "Solar", "SupIm"): 0,
("FR", "Wind", "SupIm"): 0,
}
}
)
df_orig.index = MultiIndex.from_tuples(
df_orig.index, names=["Sit", "Com", "Type"]
)
expected = df_orig.copy()
expected.iloc[[0, 2, 3]] *= 2
idx = pd.IndexSlice
df = df_orig.copy()
df.loc[idx[:, :, "Stock"], :] *= 2
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, :, "Stock"], "price"] *= 2
tm.assert_frame_equal(df, expected)
def test_multiindex_assignment(self):
# GH3777 part 2
# mixed dtype
df = DataFrame(
np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list("abc"),
index=[[4, 4, 8], [8, 10, 12]],
)
df["d"] = np.nan
arr = np.array([0.0, 1.0])
df.loc[4, "d"] = arr
tm.assert_series_equal(df.loc[4, "d"], Series(arr, index=[8, 10], name="d"))
# single dtype
df = DataFrame(
np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list("abc"),
index=[[4, 4, 8], [8, 10, 12]],
)
df.loc[4, "c"] = arr
exp = Series(arr, index=[8, 10], name="c", dtype="float64")
tm.assert_series_equal(df.loc[4, "c"], exp)
# scalar ok
df.loc[4, "c"] = 10
exp = Series(10, index=[8, 10], name="c", dtype="float64")
tm.assert_series_equal(df.loc[4, "c"], exp)
# invalid assignments
with pytest.raises(ValueError):
df.loc[4, "c"] = [0, 1, 2, 3]
with pytest.raises(ValueError):
df.loc[4, "c"] = [0]
# groupby example
NUM_ROWS = 100
NUM_COLS = 10
col_names = ["A" + num for num in map(str, np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
df = DataFrame(
np.random.randint(5, size=(NUM_ROWS, NUM_COLS)),
dtype=np.int64,
columns=col_names,
)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
df["new_col"] = np.nan
f_index = np.arange(5)
def f(name, df2):
return Series(np.arange(df2.shape[0]), name=df2.index.values[0]).reindex(
f_index
)
# TODO(wesm): unused?
# new_df = pd.concat([f(name, df2) for name, df2 in grp], axis=1).T
# we are actually operating on a copy here
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
df.loc[name, "new_col"] = new_vals
def test_series_setitem(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
s = ymd["A"]
s[2000, 3] = np.nan
assert isna(s.values[42:65]).all()
assert notna(s.values[:42]).all()
assert notna(s.values[65:]).all()
s[2000, 3, 10] = np.nan
assert isna(s[49])
def test_frame_getitem_setitem_boolean(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
tm.assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
with pytest.raises(TypeError, match="boolean values only"):
df[df * 0] = 2
def test_frame_getitem_setitem_multislice(self):
levels = [["t1", "t2"], ["a", "b", "c"]]
codes = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(codes=codes, levels=levels, names=[None, "id"])
df = DataFrame({"value": [1, 2, 3, 7, 8]}, index=midx)
result = df.loc[:, "value"]
tm.assert_series_equal(df["value"], result)
result = df.loc[df.index[1:3], "value"]
tm.assert_series_equal(df["value"][1:3], result)
result = df.loc[:, :]
tm.assert_frame_equal(df, result)
result = df
df.loc[:, "value"] = 10
result["value"] = 10
tm.assert_frame_equal(df, result)
df.loc[:, :] = 10
tm.assert_frame_equal(df, result)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[["a", "a", "b", "b"], [0, 1, 0, 1]])
cp = df.copy()
cp["a"] = cp["b"]
tm.assert_frame_equal(cp["a"], cp["b"])
# set with ndarray
cp = df.copy()
cp["a"] = cp["b"].values
tm.assert_frame_equal(cp["a"], cp["b"])
# ---------------------------------------
# #1803
columns = MultiIndex.from_tuples([("A", "1"), ("A", "2"), ("B", "1")])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df["A"] = 0.0 # Doesn't work
assert (df["A"].values == 0).all()
# it broadcasts
df["B", "1"] = [1, 2, 3]
df["A"] = df["B", "1"]
sliced_a1 = df["A", "1"]
sliced_a2 = df["A", "2"]
sliced_b1 = df["B", "1"]
tm.assert_series_equal(sliced_a1, sliced_b1, check_names=False)
tm.assert_series_equal(sliced_a2, sliced_b1, check_names=False)
assert sliced_a1.name == ("A", "1")
assert sliced_a2.name == ("A", "2")
assert sliced_b1.name == ("B", "1")
def test_getitem_setitem_tuple_plus_columns(
self, multiindex_year_month_day_dataframe_random_data
):
# GH #1013
ymd = multiindex_year_month_day_dataframe_random_data
df = ymd[:5]
result = df.loc[(2000, 1, 6), ["A", "B", "C"]]
expected = df.loc[2000, 1, 6][["A", "B", "C"]]
tm.assert_series_equal(result, expected)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(
levels=[[0, 1, 2], [0, 2]], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]]
)
frame = DataFrame(
np.random.randn(len(index), 4), index=index, columns=["a", "b", "c", "d"]
)
res = frame.loc[1:2]
exp = frame.reindex(frame.index[2:])
tm.assert_frame_equal(res, exp)
frame.loc[1:2] = 7
assert (frame.loc[1:2] == 7).values.all()
series = Series(np.random.randn(len(index)), index=index)
res = series.loc[1:2]
exp = series.reindex(series.index[2:])
tm.assert_series_equal(res, exp)
series.loc[1:2] = 7
assert (series.loc[1:2] == 7).values.all()
def test_setitem_change_dtype(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
dft = frame.T
s = dft["foo", "two"]
dft["foo", "two"] = s > s.median()
tm.assert_series_equal(dft["foo", "two"], s > s.median())
# assert isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[("foo", "two")])
tm.assert_series_equal(reindexed["foo", "two"], s > s.median())
def test_set_column_scalar_with_loc(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
subset = frame.index[[1, 4, 5]]
frame.loc[subset] = 99
assert (frame.loc[subset].values == 99).all()
col = frame["B"]
col[subset] = 97
assert (frame.loc[subset, "B"] == 97).all()
def test_nonunique_assignment_1750(self):
df = DataFrame(
[[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]], columns=list("ABCD")
)
df = df.set_index(["A", "B"])
ix = MultiIndex.from_tuples([(1, 1)])
df.loc[ix, "C"] = "_"
assert (df.xs((1, 1))["C"] == "_").all()
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = MultiIndex.from_tuples([("A", "1"), ("B", "1"), ("A", "2")])
df = DataFrame(np.arange(3).reshape((1, 3)), columns=cols, dtype=object)
index = df.index.copy()
df["A"] = df["A"].astype(np.float64)
tm.assert_index_equal(df.index, index)
def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
# this works because we are modifying the underlying array
# really a no-no
df = multiindex_dataframe_random_data.T
df["foo"].values[:] = 0
assert (df["foo"].values == 0).all()
def test_frame_setitem_copy_raises(multiindex_dataframe_random_data):
# will raise/warn as its chained assignment
df = multiindex_dataframe_random_data.T
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
df["foo"]["one"] = 2
def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data.T
expected = frame
df = frame.copy()
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
df["foo"]["one"] = 2
result = df
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
sherpaman/MolToolPy
|
bin/calc_mutual_info_bootstrap.py
|
1
|
2524
|
#!/usr/bin/env python
import ts
import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
parser = ArgumentParser( description = 'Calculate Mutual Information')
#
# INPUT FILES
#
parser.add_argument("-i","--inp",dest="inp",action="store",type=str,default=None,help="input Data",required=True,metavar="DAT FILE")
#
# OUTPUT FILES
#
parser.add_argument("-o","--out",dest="out",action="store",type=str,default=None,required=True,help="Output File Name",metavar="DAT FILE")
#
# VAR ARGUMENTS
#
parser.add_argument("-r","--resample",dest="resample",action="store",type=int,default=None,help="Bootstrap Resampling", metavar="INTEGER")
parser.add_argument("-s","--stride",dest="stride",action="store",type=int,default=1,help="time", metavar="INTEGER")
parser.add_argument("-d","--ndim",dest="ndim",action="store",type=int,default=1,help="nuber of dimensions", metavar="INTEGER")
parser.add_argument("-n","--nbins",dest="nbins",action="store",type=int ,default=10,help="number of bins", metavar="INTEGER")
parser.add_argument("-b","--opt",dest="opt",action="store_true",default=False,help="toggle bins optimization")
parser.add_argument("-p","--plot",dest="plot",action="store_true",default=False,help="toggle auto-saving matrix plot")
parser.add_argument("-x","--interleave",dest="interleave",action="store_true",default=False,help="toggle interleaving of data")
#
#
options = parser.parse_args()
def interleave(data,ndim):
nfr, nrep = data.shape
out = np.zeros(data.shape)
for i in range(nrep/ndim):
for j in range(ndim):
out[:,ndim*i+j] = data[:,j*(nrep/ndim)+i]
return out
f_dat = options.dat
f_out = options.out
stride = options.stride
f_out_mi = f_out.split('.')[0]+'_MI.svg'
f_out_dev = f_out.split('.')[0]+'_DEV.svg'
dat = np.loadtxt(f_dat)
dat = dat[::stride]
if (options.interleave) & (options.ndim != 1):
dat = interleave(dat,options.ndim)
DATA= ts.TimeSer(dat,len(dat),dim=options.ndim,nbins=options.nbins)
DATA.calc_bins(opt=options.opt)
M, E = DATA.mutual_info_omp()
M_B = d.mutual_info_bootstrap(resample=options.resample)
MI_aver=np.average(M_B,axis=2)
MI_var=np.var(M_B,axis=2)
DEV=np.abs(MI_aver - M)/np.sqrt(MI_var)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
mat1 = ax1.matshow(M)
fig1.colorbar(mat1)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
mat2 = ax2.matshow(DEV)
fig2.colorbar(mat2)
plt.show()
if options.plot:
fig1.savefig(f_out_mi,format='svg')
fig2.savefig(f_out_dev,format='svg')
quit()
|
gpl-2.0
|
rs2/pandas
|
pandas/tests/arrays/categorical/test_analytics.py
|
2
|
14388
|
import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
def test_min_max_ordered(self):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.NaN),
([1, 2, 3], np.NaN),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_deprecate_numeric_only_min_max(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with tm.assert_produces_warning(expected_warning=FutureWarning):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=["c", "a", "b"])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(["b", "a", "b"], categories=["a", "b"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["c", "b", "a", "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["c", "b", "a"], categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(["b", "a", "a"], categories=["a", "b", "c"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["b", "b", np.nan, "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["b", np.nan, "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
def test_shift(self):
# GH 9416
cat = Categorical(["a", "b", "c", "d", "a"])
# shift forward
sp1 = cat.shift(1)
xp1 = Categorical([np.nan, "a", "b", "c", "d"])
tm.assert_categorical_equal(sp1, xp1)
tm.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = Categorical(
["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"]
)
tm.assert_categorical_equal(sn2, xp2)
tm.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
tm.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = Categorical([1, 2, 3])
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
assert cat.nbytes == exp
def test_memory_usage(self):
cat = Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
assert 0 < cat.nbytes <= cat.memory_usage()
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = Categorical(["foo", "foo", "bar"])
assert cat.memory_usage(deep=True) > cat.nbytes
if not PYPY:
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
assert abs(diff) < 100
def test_map(self):
c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True)
result = c.map(lambda x: x.lower())
exp = Categorical(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_categorical_equal(result, exp)
c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False)
result = c.map(lambda x: x.lower())
exp = Categorical(list("ababc"), categories=list("abc"), ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_inplace_raises(self, value):
cat = Categorical(["A", "B", "B", "C", "A"])
msg = (
'For argument "inplace" expected type bool, '
f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
cat.set_ordered(value=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.as_ordered(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.as_unordered(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.set_categories(["X", "Y", "Z"], rename=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(["X", "Y", "Z"], inplace=value)
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(["X", "Y", "Z"], ordered=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.add_categories(new_categories=["D", "E", "F"], inplace=value)
with pytest.raises(ValueError, match=msg):
cat.remove_categories(removals=["D", "E", "F"], inplace=value)
with pytest.raises(ValueError, match=msg):
cat.remove_unused_categories(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.sort_values(inplace=value)
def test_isna(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isna()
tm.assert_numpy_array_equal(res, exp)
|
bsd-3-clause
|
zehpunktbarron/iOSMAnalyzer
|
scripts/c6_equidistance.py
|
1
|
4960
|
# -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculates the development of the equidistance between version 1 an the currently valid version of a polygon with a "natural" or "landuse"-tag
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :18.07.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pylab
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Establish a connection with the DB via psycopg2
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
-- If a polygon was merged or split new polygons arise. Their size and amount of vertices can differ considerably from each other
-- Therefore: If the difference in size of the two polygons is +- 50 % a split or merge is most likely. The value was tested iterative
-- Join currently valid osm-id of an natural or landuse polygon with their first version and calculate the difference
SELECT
round(T2.equidistance_first::numeric, 3)::float AS equidistance_first,
round(T1.equidistance_new::numeric, 3)::float AS equidistance_new
FROM
-- equidistance, amount of vertices and perimeter of the currently valid natural and landuse-tags
(SELECT id, version, minor,
((ST_Perimeter(geom))/(ST_NPoints(geom))) as equidistance_new,
(ST_NPoints(geom)) AS vertices_new,
(ST_Perimeter(geom)) AS Umfang,
(ST_Area(ST_GeographyFromText(ST_AsText(ST_Transform(geom,4326))))) AS flaeche_new -- area in m²
FROM
hist_polygon
WHERE visible = 'true' AND
(tags ? 'natural' OR tags ? 'landuse') AND
(version = (SELECT max(version) FROM hist_polygon AS h WHERE h.id = hist_polygon.id AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))
AND minor = (SELECT max(minor) FROM hist_polygon AS h WHERE h.id = hist_polygon.id AND h.version = hist_polygon.version AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))))
T1
JOIN
-- equidistance and amount of vertices of every natural and landuse-tags ever created
(SELECT id, version, minor,
((ST_Perimeter(geom))/(ST_NPoints(geom))) AS equidistance_first,
(ST_NPoints(geom)) AS vertices_first,
(ST_Area(ST_GeographyFromText(ST_AsText(ST_Transform(geom,4326))))) AS flaeche_first -- area in m²
FROM
hist_polygon
WHERE
(tags ? 'natural' OR tags ? 'landuse') AND version=1 AND minor=0
ORDER BY id asc)
T2
ON T1.id = T2.id
WHERE
-- Filter: only choose polygons where the newest polygon is within the range of +- 50% of the created polygon
(50 < (T1.flaeche_new / T2.flaeche_first *100.00)) AND ((T1.flaeche_new / T2.flaeche_first *100.00) < 150)
ORDER BY equidistance_first ASC;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples = []
for row in cur:
data_tuples.append(row)
except:
print "Query could not be executed"
###
### Plot (Line-Chart)
###
# Datatypes of the returning data
datatypes = [('col1', 'double'),('col2', 'double')]
# Data-tuple and datatype
data = np.array(data_tuples, dtype=datatypes)
# Date comes from 'col1'
col1 = data['col1']
col2 = data['col2']
# Development of the equidistance. Subtract equidistance from the initially created polygon from its corresponding one which is recently valid
devel_equ = col1 - col2
fig, ax = plt.subplots()
# Create linechart
plt.plot(devel_equ, color = '#ff6700', linewidth=2, label='Development of the Equidistance')
# Place a gray dashed grid behind the thicks (only for y-axis)
ax.xaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', linestyle='dashed')
# Set this grid behind the thicks
ax.set_axisbelow(True)
# Rotate x-labels on the x-axis
fig.autofmt_xdate()
# Label x and y axis
plt.xlabel('Polygons with a "natural" or "landuse"-Tag')
plt.ylabel('Change of Equidistance [m]')
# place legend
ax.legend(loc='upper center', prop={'size':12})
# Plot-title
plt.title('Equidistance Development of Polygons with "natural" or "landuse"-Tag"')
# Save plot to *.jpeg-file
plt.savefig('pics/c6_equidistance.jpeg')
plt.clf()
|
gpl-3.0
|
cython-testbed/pandas
|
pandas/tests/groupby/test_nth.py
|
6
|
14388
|
import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isna, Timestamp
from pandas.compat import lrange
from pandas.util.testing import (
assert_frame_equal,
assert_produces_warning,
assert_series_equal)
import pytest
def test_first_last_nth(df):
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = df.loc[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
df.loc[df['A'] == 'foo', 'B'] = np.nan
assert isna(grouped['B'].first()['foo'])
assert isna(grouped['B'].last()['foo'])
assert isna(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(df_mixed_floats):
df = df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
assert s.dtype == 'int64'
f = s.groupby(level=0).first()
assert f.dtype == 'int64'
def test_nth():
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.loc[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name == 1
assert expected2.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
# PR 17493, related to issue 11038
# test Series.nth with True for dropna produces FutureWarning
with assert_produces_warning(FutureWarning):
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = [df.index.year, df.index.month]
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(three_group):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('data, expected_first, expected_last', [
({'id': ['A'],
'time': Timestamp('2012-02-01 14:00:00',
tz='US/Central'),
'foo': [1]},
{'id': ['A'],
'time': Timestamp('2012-02-01 14:00:00',
tz='US/Central'),
'foo': [1]},
{'id': ['A'],
'time': Timestamp('2012-02-01 14:00:00',
tz='US/Central'),
'foo': [1]}),
({'id': ['A', 'B', 'A'],
'time': [Timestamp('2012-01-01 13:00:00',
tz='America/New_York'),
Timestamp('2012-02-01 14:00:00',
tz='US/Central'),
Timestamp('2012-03-01 12:00:00',
tz='Europe/London')],
'foo': [1, 2, 3]},
{'id': ['A', 'B'],
'time': [Timestamp('2012-01-01 13:00:00',
tz='America/New_York'),
Timestamp('2012-02-01 14:00:00',
tz='US/Central')],
'foo': [1, 2]},
{'id': ['A', 'B'],
'time': [Timestamp('2012-03-01 12:00:00',
tz='Europe/London'),
Timestamp('2012-02-01 14:00:00',
tz='US/Central')],
'foo': [3, 2]})
])
def test_first_last_tz(data, expected_first, expected_last):
# GH15884
# Test that the timezone is retained when calling first
# or last on groupby with as_index=False
df = DataFrame(data)
result = df.groupby('id', as_index=False).first()
expected = DataFrame(expected_first)
cols = ['id', 'time', 'foo']
assert_frame_equal(result[cols], expected[cols])
result = df.groupby('id', as_index=False)['time'].first()
assert_frame_equal(result, expected[['id', 'time']])
result = df.groupby('id', as_index=False).last()
expected = DataFrame(expected_last)
cols = ['id', 'time', 'foo']
assert_frame_equal(result[cols], expected[cols])
result = df.groupby('id', as_index=False)['time'].last()
assert_frame_equal(result, expected[['id', 'time']])
def test_nth_multi_index_as_expected():
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame(
{'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_groupby_head_tail():
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
g_as = df.groupby('A', as_index=True)
g_not_as = df.groupby('A', as_index=False)
# as_index= False, much easier
assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
empty_not_as = DataFrame(columns=df.columns,
index=pd.Index([], dtype=df.index.dtype))
empty_not_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_not_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_not_as, g_not_as.head(0))
assert_frame_equal(empty_not_as, g_not_as.tail(0))
assert_frame_equal(empty_not_as, g_not_as.head(-1))
assert_frame_equal(empty_not_as, g_not_as.tail(-1))
assert_frame_equal(df, g_not_as.head(7)) # contains all
assert_frame_equal(df, g_not_as.tail(7))
# as_index=True, (used to be different)
df_as = df
assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
empty_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_as, g_as.head(0))
assert_frame_equal(empty_as, g_as.tail(0))
assert_frame_equal(empty_as, g_as.head(-1))
assert_frame_equal(empty_as, g_as.tail(-1))
assert_frame_equal(df_as, g_as.head(7)) # contains all
assert_frame_equal(df_as, g_as.tail(7))
# test with selection
assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
assert_frame_equal(g_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
assert_frame_equal(g_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_not_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
def test_group_selection_cache():
# GH 12839 nth, head, and tail should return same result consistently
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
expected = df.iloc[[0, 2]].set_index('A')
g = df.groupby('A')
result1 = g.head(n=2)
result2 = g.nth(0)
assert_frame_equal(result1, df)
assert_frame_equal(result2, expected)
g = df.groupby('A')
result1 = g.tail(n=2)
result2 = g.nth(0)
assert_frame_equal(result1, df)
assert_frame_equal(result2, expected)
g = df.groupby('A')
result1 = g.nth(0)
result2 = g.head(n=2)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, df)
g = df.groupby('A')
result1 = g.nth(0)
result2 = g.tail(n=2)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, df)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=['a', 'b', 'c'])
result = df.groupby('a').nth(10)
expected = DataFrame(index=Index([], name='a'), columns=['b', 'c'])
assert_frame_equal(result, expected)
result = df.groupby(['a', 'b']).nth(10)
expected = DataFrame(index=MultiIndex([[], []], [[], []],
names=['a', 'b']),
columns=['c'])
assert_frame_equal(result, expected)
|
bsd-3-clause
|
justincassidy/scikit-learn
|
sklearn/utils/multiclass.py
|
83
|
12343
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
bsd-3-clause
|
gotomypc/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
5245
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
nddsg/TreeDecomps
|
xplodnTree/core/exact_phrg.py
|
1
|
13394
|
#!/usr/bin/env python
# make the other metrics work
# generate the txt files, then work on the pdf otuput
__version__ = "0.1.0"
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import sys
import os
import re
import networkx as nx
import tdec.PHRG as phrg
import tdec.tree_decomposition as td
import tdec.probabilistic_cfg as pcfg
import tdec.net_metrics as metrics
import tdec.load_edgelist_from_dataframe as tdf
import pprint as pp
import argparse, traceback
import tdec.graph_sampler as gs
DBG = False
#~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~100
def get_parser ():
parser = argparse.ArgumentParser(description='Infer a model given a graph (derive a model)')
parser.add_argument('--orig', required=True, nargs=1, help='Filename of edgelist graph')
parser.add_argument('--chunglu', help='Generate chunglu graphs',action='store_true')
parser.add_argument('--kron', help='Generate Kronecker product graphs',action='store_true')
parser.add_argument('--samp', help='Sample sg>dur>gg2targetN', action='store_true')
parser.add_argument('-tw', action='store_true', default=False, required=False, help="print xphrg mcs tw")
parser.add_argument('-prs', action='store_true', default=False, required=False, help="stop at prs")
parser.add_argument('--version', action='version', version=__version__)
return parser
def nslog(arb_str):
print "~^."*20
print "\t", arb_str.split("_")
print
def Hstar_Graphs_Control (G, graph_name, axs=None):
# Derive the prod rules in a naive way, where
prod_rules = phrg.probabilistic_hrg_learning(G)
pp.pprint(prod_rules)
exit()
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
num_nodes = G.number_of_nodes()
print "Starting max size", 'n=', num_nodes
g.set_max_size(num_nodes)
print "Done with max size"
Hstars = []
num_samples = 20
print '*' * 40
for i in range(0, num_samples):
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
Hstars.append(hstar)
# if 0:
# g = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=['ts'])
# draw_degree_whole_graph(g,axs)
# draw_degree(Hstars, axs=axs, col='r')
# #axs.set_title('Rules derived by ignoring time')
# axs.set_ylabel('Frequency')
# axs.set_xlabel('degree')
if 0:
# metricx = [ 'degree','hops', 'clust', 'assort', 'kcore','eigen','gcd']
metricx = ['gcd']
# g = nx.from_pandas_dataframe(df, 'src', 'trg',edge_attr=['ts'])
# graph_name = os.path.basename(f_path).rstrip('.tel')
if DBG: print ">", graph_name
metrics.network_properties([G], metricx, Hstars, name=graph_name, out_tsv=True)
def pandas_dataframes_from_edgelists (el_files):
if (el_files is None): return
list_of_dataframes = []
for f in el_files:
print '~' * 80
print f
temporal_graph = False
with open(f, 'r') as ifile:
line = ifile.readline()
while (not temporal_graph):
if ("%" in line):
line = ifile.readline()
elif len(line.split()) > 3:
temporal_graph = True
if (temporal_graph):
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1, 3], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg', 'ts'])
else:
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg'])
df = df.drop_duplicates()
list_of_dataframes.append(df)
return list_of_dataframes
def grow_exact_size_hrg_graphs_from_prod_rules(prod_rules, gname, n, runs=1):
"""
Args:
rules: production rules (model)
gname: graph name
n: target graph order (number of nodes)
runs: how many graphs to generate
Returns: list of synthetic graphs
"""
nslog("grow_exact_size_hrg_graphs_from_prod_rules")
DBG = True
if n <=0: sys.exit(1)
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
print
print "Added rules HRG (pr", len(prod_rules),", n,", n,")"
exit() # temp pls remove me
num_nodes = n
if DBG: print "Starting max size"
g.set_max_size(num_nodes)
if DBG: print "Done with max size"
hstars_lst = []
print " ",
for i in range(0, runs):
print '>',
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
hstars_lst.append(hstar)
return hstars_lst
def pwrlaw_plot (xdata, ydata, yerr):
from scipy import linspace, randn, log10, optimize, sqrt
powerlaw = lambda x, amp, index: amp * (x**index)
logx = log10(xdata)
logy = log10(ydata)
logyerr = yerr / ydata
# define our (line) fitting function
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
pinit = [1.0, -1.0]
out = optimize.leastsq(errfunc, pinit,
args=(logx, logy, logyerr), full_output=1)
pfinal = out[0]
covar = out[1]
print pfinal
print covar
index = pfinal[1]
amp = 10.0**pfinal[0]
indexErr = sqrt( covar[0][0] )
ampErr = sqrt( covar[1][1] ) * amp
print index
# ########
# plotting
# ########
# ax.plot(ydata)
# ax.plot(pl_sequence)
fig, axs = plt.subplots(2,1)
axs[0].plot(xdata, powerlaw(xdata, amp, index)) # Fit
axs[0].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
(yh1,yh2) = (axs[0].get_ylim()[1]*.9, axs[0].get_ylim()[1]*.8)
xh = axs[0].get_xlim()[0]*1.1
print axs[0].get_ylim()
print (yh1,yh2)
axs[0].text(xh, yh1, 'Ampli = %5.2f +/- %5.2f' % (amp, ampErr))
axs[0].text(xh, yh2, 'Index = %5.2f +/- %5.2f' % (index, indexErr))
axs[0].set_title('Best Fit Power Law')
axs[0].set_xlabel('X')
axs[0].set_ylabel('Y')
# xlim(1, 11)
#
# subplot(2, 1, 2)
axs[1].loglog(xdata, powerlaw(xdata, amp, index))
axs[1].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
axs[1].set_xlabel('X (log scale)')
axs[1].set_ylabel('Y (log scale)')
import datetime
figfname = datetime.datetime.now().strftime("%d%b%y")+"_pl"
plt.savefig(figfname, bbox_inches='tight')
return figfname
def deg_vcnt_to_disk(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
# ## - group of synth graphs -
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
deg_df['mean'].to_csv("Results/deg_xphrg_"+orig_graph.name+".tsv", sep='\t', header=True)
def plot_g_hstars(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
# k_cnt = [(x.tolist(),y.values[0]) for x,y in gb.iterrows()]
xdata = np.array([x.tolist() for x,y in gb.iterrows()])
ydata = np.array([y.values[0] for x,y in gb.iterrows()])
yerr = ydata *0.000001
fig, ax = plt.subplots()
ax.plot(gb.index.values, gb['vcnt'].values,'-o', markersize=8, markerfacecolor='w', markeredgecolor=[0,0,1], alpha=0.5, label="orig")
ofname = pwrlaw_plot(xdata, ydata,yerr)
if os.path.exists(ofname): print '... Plot save to:',ofname
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
# ax.plot(y=deg_df.mean(axis=1))
# ax.plot(y=deg_df.median(axis=1))
# ax.plot()
# orig
deg_df.mean(axis=1).plot(ax=ax,label='mean',color='r')
deg_df.median(axis=1).plot(ax=ax,label='median',color='g')
ax.fill_between(deg_df.index, deg_df.mean(axis=1) - deg_df.sem(axis=1),
deg_df.mean(axis=1) + deg_df.sem(axis=1), alpha=0.2, label="se")
# ax.plot(k_cnt)
# deg_df.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
#
# for g in synths:
# df = pd.DataFrame(g.degree().items())
# gb = df.groupby([1]).count()
# # gb.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
# # Curve-fit
#
plt.savefig('tmpfig', bbox_inches='tight')
def treewidth(parent, children,twlst ):
twlst.append(parent)
for x in children:
if isinstance(x, (tuple,list)):
treewidth(x[0],x[1],twlst)
else:
print type(x), len(x)
def print_treewdith(tree):
root, children = tree
print " computing tree width"
twdth=[]
treewidth(root, children,twdth)
print ' Treewidth:', np.max([len(x)-1 for x in twdth])
def get_hrg_production_rules(edgelist_data_frame, graph_name, tw=False, n_subg=2,n_nodes=300):
from tdec.growing import derive_prules_from
nslog("get_hrg_production_rules")
df = edgelist_data_frame
if df.shape[1] == 4:
G = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=True) # whole graph
elif df.shape[1] ==3:
G = nx.from_pandas_dataframe(df, 'src', 'trg', ['ts']) # whole graph
else:
G = nx.from_pandas_dataframe(df, 'src', 'trg')
G.name = graph_name
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
num_nodes = G.number_of_nodes()
phrg.graph_checks(G)
if DBG: print
if DBG: print "--------------------"
if not DBG: print "-Tree Decomposition-"
if DBG: print "--------------------"
prod_rules = {}
K = n_subg
n = n_nodes
if num_nodes >= 500:
print 'Grande'
for Gprime in gs.rwr_sample(G, K, n):
T = td.quickbb(Gprime)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
#td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
else:
T = td.quickbb(G)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
# td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
if tw:
print_treewidth(T)
exit()
## --
print ("prod_rules:",len(prod_rules), type(prod_rules))
if DBG: print
if DBG: print "--------------------"
if DBG: print "- Production Rules -"
if DBG: print "--------------------"
for k in prod_rules.iterkeys():
if DBG: print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(s) # normailization step to create probs not counts.
if DBG: print '\t -> ', d, prod_rules[k][d]
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
if DBG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
df = pd.DataFrame(rules)
print "++++++++++"
df.to_csv('ProdRules/{}_prs.tsv'.format(G.name), header=False, index=False, sep="\t")
if os.path.exists('ProdRules/{}_prs.tsv'.format(G.name)):
print 'Saved', 'ProdRules/{}_prs.tsv'.format(G.name)
else:
print "Trouble saving"
print "-----------"
print [type(x) for x in rules[0]]
'''
Graph Generation of Synthetic Graphs
Grow graphs usigng the union of rules from sampled sugbgraphs to predict the target order of the
original graph
'''
hStars = grow_exact_size_hrg_graphs_from_prod_rules(rules, graph_name, G.number_of_nodes(),10)
print '... hStart graphs:',len(hStars)
if 0:
metricx = ['degree','hops', 'clust', 'assort', 'kcore','eigen','gcd']
metricx = ['gcd']
metrics.network_properties([G], metricx, hStars, name=graph_name, out_tsv=False)
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
# load orig file into DF and get the dataset name into g_name
datframes = tdf.Pandas_DataFrame_From_Edgelist(args['orig'])
df = datframes[0]
g_name = [x for x in os.path.basename(args['orig'][0]).split('.') if len(x)>3][0]
if args['chunglu']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
elif args['kron']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
elif args['samp']:
print 'Sample K subgraphs of n nodes'
K = 500
n = 25
get_hrg_production_rules(df,g_name,n_subg=K, n_nodes=n)
else:
try:
get_hrg_production_rules(df,g_name, args['tw'])
except Exception, e:
print 'ERROR, UNEXPECTED SAVE PLOT EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
sys.exit(0)
|
mit
|
codeWangHub/machineLearningAnywhere
|
tensorflow action/1st_tensorflow_output_viewable /outputViewable.py
|
1
|
2697
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
view_size = 50
gate = 0.02
x_data = np.linspace(-1,1,300).astype(np.float32).reshape(300,1)
noise = np.random.normal(loc=0.1, scale = 0.05, size=x_data.shape) # 噪声
y_data = np.power(x_data,2) - 0.5 + noise # y 的真实分布
ac_loss = plt.subplot(2,2,1)
ac_acc = plt.subplot(2,2,2)
ac_view = plt.subplot(2,1,2)
plt.ion() # 不堵塞
ac_view.scatter(x_data,y_data,color='blue',s=10)
ac_view.axis([-2,2,-1,5])
plt.show()
#更新绘图
def update_figure(step,loss,accuracy,x,y) :
try:
ac_loss.lines.pop(0)
except Exception :
pass
ac_loss.plot([i for i in range(0,step,view_size)],
loss,color='red', label='loss')
try:
ac_acc.lines.pop(0)
except Exception :
pass
ac_acc.plot([i for i in range(0,step,view_size)],
accuracy,color='green',label='accuracy')
try:
ac_view.lines.pop(0)
except Exception :
pass
ac_view.plot(x,y,color='red',label='model')
# plt.savefig("fig_{}.png".format(str(step)))
# 构建模型
def add_layer(input,input_size,output_size,activor=None):
Weight = tf.Variable(tf.random_normal(
shape=[input_size,output_size]))
bias = tf.Variable(tf.zeros(shape=[output_size]))
output = tf.add(tf.matmul(input,Weight),bias)
if activor is not None :
output = activor(output)
return output
x = tf.placeholder(tf.float32,shape=[None,1])
y = tf.placeholder(tf.float32,shape=[None,1])
h1 = add_layer(x,1,20,tf.nn.tanh)
h2 = add_layer(h1,20,20,tf.nn.tanh)
h3 = add_layer(h2,20,20,tf.nn.tanh)
output = add_layer(h3,20,1)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(output-y_data),reduction_indices=[1]))
# tf.equal(output,y_data) ===> [True False True .....]
# tf.cast([True, False ...]) ===> [1,0,....]
# tf.reduce_mean([1,0,1,...]) ===> 1+0+1+..+1 / N
accuracy = tf.reduce_mean(tf.cast( (output-y_data) <= gate, # 误差小于gate就认为预测成功
tf.float32))
init = tf.initialize_all_variables()
# 指定优化器
train_setp = tf.train.GradientDescentOptimizer(0.005).minimize(loss)
with tf.Session() as sess :
sess.run(init)
losses = [] # 存放各个步骤的loss
accuracies = [] # 存放各个步骤的正确率
batch_x = x_data.reshape([300,1])
batch_y = y_data.reshape([300,1])
for i in range(1001):
sess.run(train_setp,feed_dict={x:batch_x, y:batch_y})
if i % view_size == 0 :
ls = sess.run(loss,feed_dict={x:batch_x, y:batch_y})
acc = sess.run(accuracy,feed_dict={x:batch_x, y:batch_y})
losses.append(ls)
accuracies.append(acc)
prod = sess.run(output,feed_dict={x:batch_x, y:batch_y})
update_figure(i+view_size,losses,accuracies,x_data,prod)
plt.pause(0.3)
|
apache-2.0
|
ericmjl/influenza-reassortment
|
preprocessing.py
|
2
|
4903
|
"""
This script performs data preprocessing.
"""
import pandas as pd
import sys
from Bio import SeqIO
class Preprocessor(object):
"""docstring for Preprocessor"""
def __init__(self, handle):
super(Preprocessor, self).__init__()
self.handle = handle
self.df = None
self.fasta = None
# Curate a list of strain names to exclude from the analysis.
self.strain_name_exclusions = ['little yellow-shouldered bat']
def run(self):
self.read_dataframe()
self.clean_strain_names()
self.remove_excluded_strains()
self.remove_isolates_with_bad_names()
self.clean_host_species()
self.impute_location()
self.remove_low_quality_accessions()
self.impute_dates()
self.get_complete_genome_isolates()
self.save_full_isolates()
def remove_excluded_strains(self):
for name in self.strain_name_exclusions:
self.df = self.df[self.df['Strain Name'].str.contains(name) == False]
def remove_isolates_with_bad_names(self):
print('Removing isolates with bad names...')
allowed_lengths = [4, 5]
names_to_drop = set()
for row, data in self.df.iterrows():
strain_name = data['Strain Name'].split('/')
if len(strain_name) not in allowed_lengths:
names_to_drop.add(data['Strain Name'])
for name in names_to_drop:
self.df = self.df[self.df['Strain Name'] != name]
print('Isolates with bad names removed.')
def read_dataframe(self):
"""
Reads the CSV file containing the data into memory.
"""
print('Reading DataFrame into memory...')
self.df = pd.read_csv('{0} Sequences.csv'.format(self.handle), parse_dates=['Collection Date'], na_filter=False)
print('DataFrame read into memory.')
def read_fasta(self):
print('Reading FASTA file into memory...')
self.fasta = SeqIO.to_dict(SeqIO.parse('{0} Sequences.fasta'.format(self.handle), 'fasta'))
print('FASTA file read into memory.')
def clean_strain_names(self):
"""
This function removes parentheses from the strain names, leaving only
the strain name without any other info.
"""
print('Cleaning strain names...')
self.df['Strain Name'] = self.df['Strain Name'].str.replace("\\", "/")
self.df['Strain Name'] = self.df['Strain Name'].str.split("(").apply(lambda x: max(x, key=len))
print('Strain names cleaned.')
def clean_host_species(self):
"""
Host species are usually stored as IRD:hostname.
"""
print('Cleaning host species names...')
self.df['Host Species'] = self.df['Host Species'].str.split(':').str[-1]
print('Host species names cleaned.')
def impute_location(self):
print('Imputing location data...')
self.df['State/Province'] = self.df['Strain Name'].str.split("/").apply(lambda x: x[1] if len(x) == 4 else x[2])
print('Location imputed.')
def impute_dates(self):
print('Imputing collection date data...')
self.df['Collection Date'] = pd.to_datetime(self.df['Collection Date'])
print('Collection dates imputed.')
def remove_low_quality_accessions(self):
print('Removing low quality accessions...')
self.df = self.df[self.df['Sequence Accession'].str.contains('\*') == False]
print('Low quality accessions removed.')
def get_complete_genome_isolates(self):
print('Filtering to completed genomes only...')
rows_to_drop = []
for name, df in self.df.groupby('Strain Name'):
if len(df) == 8 and set(df['Segment'].values) == set(range(1,9)):
pass
else:
rows_to_drop.extend(df.index)
self.df = self.df.drop(rows_to_drop)
print('Filtering complete.')
def save_full_isolates(self):
print('Saving data table comprising only isolates with full genomes...')
self.df.to_csv('{0} Full Isolates.csv'.format(self.handle))
print('Full genome isolates saved.')
def write_segment_fasta(self, segnum):
print('Writing segment FASTA files...')
accessions = self.df.groupby('Segment').get_group(segnum)['Sequence Accession'].values
if set(accessions).issubset(set(self.fasta.keys())):
sequences = [record for accession, record in self.fasta.items() if accession in accessions]
with open('{0} Segment {1}.fasta'.format(self.handle, segnum), 'w+') as f:
SeqIO.write(sequences, f, 'fasta')
else:
raise Exception("Not all requested accessions in original download.")
print('Segment FASTA files written.')
if __name__ == '__main__':
handle = sys.argv[1]
p = Preprocessor(handle)
p.run()
|
mit
|
rveciana/BasemapTutorial
|
code_examples/clip/clip.py
|
3
|
1399
|
from mpl_toolkits.basemap import Basemap
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
from osgeo import gdal
import numpy
import shapefile
fig = plt.figure()
ax = fig.add_subplot(111)
sf = shapefile.Reader("ne_10m_admin_0_countries")
for shape_rec in sf.shapeRecords():
if shape_rec.record[3] == 'Andorra':
vertices = []
codes = []
pts = shape_rec.shape.points
prt = list(shape_rec.shape.parts) + [len(pts)]
for i in range(len(prt) - 1):
for j in range(prt[i], prt[i+1]):
vertices.append((pts[j][0], pts[j][1]))
codes += [Path.MOVETO]
codes += [Path.LINETO] * (prt[i+1] - prt[i] -2)
codes += [Path.CLOSEPOLY]
clip = Path(vertices, codes)
clip = PathPatch(clip, transform=ax.transData)
m = Basemap(llcrnrlon=1.4,
llcrnrlat=42.4,
urcrnrlon=1.77,
urcrnrlat=42.7,
resolution = None,
projection = 'cyl')
ds = gdal.Open('srtm_37_04.tif')
data = ds.ReadAsArray()
gt = ds.GetGeoTransform()
x = numpy.linspace(gt[0], gt[0] + gt[1] * data.shape[1], data.shape[1])
y = numpy.linspace(gt[3], gt[3] + gt[5] * data.shape[0], data.shape[0])
xx, yy = numpy.meshgrid(x, y)
cs = m.contourf(xx,yy,data,range(0, 3600, 200))
for contour in cs.collections:
contour.set_clip_path(clip)
plt.show()
|
cc0-1.0
|
henrykironde/scikit-learn
|
examples/cluster/plot_dbscan.py
|
346
|
2479
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
RuthAngus/turnip
|
turnip/calc_completeness.py
|
1
|
1492
|
"""
Ruth's version of Burke's test_comp_grid.py as a function.
"""
import numpy as np
import matplotlib.pyplot as plt
import KeplerPORTs_utils as kpu
def calc_comp(kepid, period, radius):
"""
Calculate the completeness at a given radius and period for a KIC star.
This includes the probability of transiting.
parameters:
----------
kepid: (int)
The KIC id.
period: (float)
The target period.
radius: (float)
The target radius.
returns:
--------
The Completeness.
FIXME: Interpolate instead of finding nearest.
"""
# Instantiate pipeline completeness class structure
doit = kpu.kepler_single_comp_data()
doit.id = kepid
doit.period_want = np.array([period])
doit.rp_want = np.array([radius])
doit.rstar = 0.98
doit.logg = 4.44
doit.deteffver = 2
doit.ecc = 0.0
doit.dataspan = 1426.7
doit.dutycycle = 0.879
doit.pulsedurations = [1.5, 2.0, 2.5, 3.0, 3.5, 4.5, 5.0, 6.0, 7.5, 9.0,
10.5, 12.0, 12.5, 15.0]
doit.cdpps = [36.2, 33.2, 31.0, 29.4, 28.0, 26.1, 25.4, 24.2, 23.1, 22.4,
21.9, 21.8, 21.7, 21.5]
doit.mesthresh = np.full_like(doit.pulsedurations,7.1)
# Calculate completeness over the grid of periods and radii.
probdet, probtot = kpu.kepler_single_comp(doit)
return probtot[0][0]
if __name__ == "__main__":
print(calc_comp(10593626, 365.25, 1))
print(calc_comp(10141213, 365.25, 1))
|
mit
|
enigmampc/catalyst
|
catalyst/examples/buy_btc_simple.py
|
1
|
1715
|
"""
This is a very simple example referenced in the beginner's tutorial:
https://enigmampc.github.io/catalyst/beginner-tutorial.html
Run this example, by executing the following from your terminal:
catalyst ingest-exchange -x bitfinex -f daily -i btc_usdt
catalyst run -f buy_btc_simple.py -x bitfinex --start 2016-1-1 \
--end 2017-9-30 -o buy_btc_simple_out.pickle
If you want to run this code using another exchange, make sure that
the asset is available on that exchange. For example, if you were to run
it for exchange Poloniex, you would need to edit the following line:
context.asset = symbol('btc_usdt') # note 'usdt' instead of 'usd'
and specify exchange poloniex as follows:
catalyst ingest-exchange -x poloniex -f daily -i btc_usdt
catalyst run -f buy_btc_simple.py -x poloniex --start 2016-1-1 \
--end 2017-9-30 -o buy_btc_simple_out.pickle
To see which assets are available on each exchange, visit:
https://www.enigma.co/catalyst/status
"""
from catalyst import run_algorithm
from catalyst.api import order, record, symbol
import pandas as pd
def initialize(context):
context.asset = symbol('btc_usdt')
def handle_data(context, data):
order(context.asset, 1)
record(btc=data.current(context.asset, 'price'))
if __name__ == '__main__':
run_algorithm(
capital_base=10000,
data_frequency='daily',
initialize=initialize,
handle_data=handle_data,
exchange_name='poloniex',
algo_namespace='buy_btc_simple',
quote_currency='usdt',
start=pd.to_datetime('2015-03-01', utc=True),
end=pd.to_datetime('2017-10-31', utc=True),
)
|
apache-2.0
|
tudarmstadt-lt/taxi
|
jnt/isas/format_patternsim_isas.py
|
1
|
4181
|
import argparse
import codecs
from pandas import read_csv
from collections import defaultdict
from traceback import format_exc
import operator
import re
from os.path import splitext
DEBUG = False
CHUNK_SIZE=1000000
re_comma = re.compile(r"^([^,]*),.*", re.U|re.I)
def clean_patternsim_term(term):
cterm = str(term)
cterm = re_comma.sub(r"\1", cterm)
return cterm
def patternsim2isas(patternsim_fpath, output_fpath):
patternsim_df = read_csv(patternsim_fpath, encoding='utf-8', delimiter=";", error_bad_lines=False, low_memory=True)
print("Loaded %d pairs" % len(patternsim_df))
isas = defaultdict(dict)
for i, row in patternsim_df.iterrows():
try:
if i % 100000 == 0: print(i)
word1 = clean_patternsim_term(row.form)
word2 = clean_patternsim_term(row.related)
word1_isa_word2 = int(row.hypo)
word2_isa_word1 = int(row.hyper)
if (word1 not in isas or word2 not in isas[word1]) and word1_isa_word2 > 0:
isas[word1][word2] = word1_isa_word2
elif word1_isa_word2 > 0:
isas[word1][word2] += word1_isa_word2
if (word2 not in isas or word1 not in isas[word2]) and word2_isa_word1 > 0:
isas[word2][word1] = word2_isa_word1
elif word2_isa_word1 > 0:
isas[word2][word1] += word2_isa_word1
except:
pass
#print "Bad row:", row
print(format_exc())
with codecs.open(output_fpath, "w", "utf-8") as out:
print("hyponym\thypernym\tfreq", file=out)
for hypo in isas:
for hyper, freq in sorted(list(isas[hypo].items()), key=operator.itemgetter(1), reverse=True):
if isas[hypo][hyper] <= 0:
print("Skipping '%s' --(%d)--> '%s'" % (hypo, isas[hypo][hyper], hyper))
continue
print("%s\t%s\t%d" % (hypo, hyper, freq), file=out)
print("Output:", output_fpath)
def patternsim2isas_hh(hh_fpath, output_fpath):
""" Transforms file 'word1<TAB>word2<TAB>word1_isa_word2<TAB>word2_isa_word1' to 'hyponym<TAB>hypernym<TAB>freq'."""
hh_df = read_csv(hh_fpath, encoding='utf-8', delimiter="\t", error_bad_lines=False, low_memory=False)
isas = defaultdict(dict)
for i, row in hh_df.iterrows():
try:
#if i > 100000: break
if i % 100000 == 0: print(i)
word1 = clean_patternsim_term(row.word1)
word2 = clean_patternsim_term(row.word2)
word1_isa_word2 = int(row.word1_isa_word2)
word2_isa_word1 = int(row.word2_isa_word1)
if word1 not in isas or word2 not in isas[word1]:
isas[word1][word2] = word1_isa_word2
else:
isas[word1][word2] += word1_isa_word2
if word2 not in isas or word1 not in isas[word2]:
isas[word2][word1] = word2_isa_word1
else:
isas[word2][word1] += word2_isa_word1
except:
print("Bad row:", row)
print(format_exc())
with codecs.open(output_fpath, "w", "utf-8") as out:
print("hyponym\thypernym\tfreq", file=out)
for hypo in isas:
for hyper, freq in sorted(list(isas[hypo].items()), key=operator.itemgetter(1), reverse=True):
if isas[hypo][hyper] <= 0: continue
print("%s\t%s\t%d" % (hypo, hyper, freq), file=out)
print("Output:", output_fpath)
def main():
parser = argparse.ArgumentParser(description="Transforms PatternSim output pairs.csv to a CSV file "
"'hyponym<TAB>hypernym<TAB>freq' with a header.")
parser.add_argument('inp', help='Path to an input file.')
parser.add_argument('-o', help='Output file. Default -- next to input file.', default="")
args = parser.parse_args()
output_fpath = splitext(args.inp)[0] + "-isas.csv" if args.o == "" else args.o
print("Input: ", args.inp)
print("Output: ", output_fpath)
patternsim2isas(args.inp, output_fpath)
if __name__ == '__main__':
main()
|
apache-2.0
|
OriolAbril/pyMESA
|
grafics_mesa.py
|
1
|
9832
|
# import libraries
import argparse as arp # command line parsing module and help
import re # regular expressions
import sys
import os
scriptpath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(scriptpath)
import pymesa.tools as pym
class matplotlibScale(arp.Action):
def __init__(
self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
):
arp.Action.__init__(
self,
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, value, option_string=None):
yscale = "log" if value in ["logy", "loglog", "logxy"] else "linear"
xscale = "log" if value in ["logx", "loglog", "logxy"] else "linear"
# Save the results in the namespace using the destination
# variable given to our constructor.
setattr(namespace, "yscale", yscale)
setattr(namespace, "xscale", xscale)
delattr(namespace, "scale")
p = arp.ArgumentParser(
prog="MESA_grafics",
description="Script to plot data from the .data output files from MESA",
)
p.add_argument("--version", action="version", version="%(prog)s 2.1")
p.add_argument(
"files",
metavar="FILES",
help="Name of the .data type file/s with the extension",
nargs="+",
)
group = p.add_mutually_exclusive_group(required=True)
group.add_argument(
"-c",
"--columns",
help="Columns to be plotted, the first one will be used as x values.\
The separation between columnes for the same file should be a comma and between different\
files a space.",
nargs="+",
)
group.add_argument(
"-hd",
"--headers",
help="Show available headers in the files. Default: False",
action="store_true",
default=False,
)
hdpar = p.add_argument_group(title="Commands to personalize the headers output")
hdpar.add_argument(
"-tc",
"--terminalcols",
help="Number of columns to show the headers when using the -hd flag",
type=int,
default=0,
)
hdpar.add_argument(
"-o",
"--order",
help="Choose fist direction to sort the names, either first descending and\
then to the right (default) or viceversa",
type=str,
default="descending",
choices=["d", "descending", "r", "right"],
)
pltpar = p.add_argument_group(title="Commands to personalize plots")
pltpar.add_argument(
"-pqg",
help="Use PyQtGraph as plotting module instead of Matplotlib",
action="store_true",
default=False,
)
pltpar.add_argument(
"-np",
help="Do not show the plot. Default: False",
action="store_true",
default=False,
)
pltpar.add_argument(
"-off",
"--offset",
help="Use matplotlib" "s default offest in axis ticks. Default:False",
action="store_true",
default=False,
)
pltpar.add_argument(
"-eps",
help="Write matplotlib plot as an Encapsulated PostScript. The name can be specified",
type=str,
nargs="?",
default="noeps",
const="sieps",
)
pltpar.add_argument(
"-sc",
"--scale",
help="Choose the scale between: liner, semilogy, semilogx or logxy",
type=str,
default="lin",
choices=["lin", "logy", "logx", "loglog", "logxy"],
action=matplotlibScale,
)
pltpar.add_argument(
"-co",
"--colors",
help="Colors to be used in the plot, they must be valid matplotlib colors",
nargs="+",
)
pltpar.add_argument(
"-lw",
"--linewidth",
help="Set linewidth of matplotlib plots",
default=1,
type=float,
)
pltpar.add_argument("-t", "--title", help="Title of the plot", type=str, default="")
pltpar.add_argument(
"-lt", "--legtit", help="Title for the legend", type=str, default=""
)
pltpar.add_argument(
"-l",
"--legend",
help="Labels for the legend. Default text is name of y data column.",
nargs="+",
)
pltpar.add_argument(
"-lp",
"--legprefix",
help="Prefix for the legend labels. If present, legprefix must\
have the same length as files and legend as the number of columns minus one",
nargs="+",
)
pltpar.add_argument("-xl", "--xlabel", help="Label of the x axis", type=str)
pltpar.add_argument("-yl", "--ylabel", help="Label of the y axis", type=str, default="")
pltpar.add_argument("-x", "--xlim", help="Set the xaxis limits", nargs=2, type=float)
pltpar.add_argument("-y", "--ylim", help="Set the yaxis limits", nargs=2, type=float)
args = p.parse_args() # parse arguments
if args.headers: # set variables for header mode
args.order = "descending" if args.order[0] == "d" else "right"
args.terminalcols = "auto" if args.terminalcols == 0 else args.terminalcols
else: # set plot variables and configuration
try:
args.xscale
except AttributeError:
setattr(args, "xscale", "linear")
setattr(args, "yscale", "linear")
if len(args.files) != len(args.columns):
args.columns = [args.columns[0]] * len(args.files)
allplots = sum([len(k.split(",")) - 1 for k in args.columns])
if not args.xlabel:
args.xlabel = args.columns[0].split(",")[0]
if args.legprefix:
y_columns = args.columns[0].split(",")[1:]
if len(args.files) == len(args.legprefix) and not args.legend:
args.legend = [pre + lab for pre in args.legprefix for lab in y_columns]
elif len(args.legend) == len(y_columns):
args.legend = [pre + lab for pre in args.legprefix for lab in args.legend]
mpl = (not (args.np) and not (args.pqg)) or args.eps != "noeps"
if mpl: # import and initialize matplotlib
import matplotlib
matplotlib.use("Qt5Agg")
matplotlib.rcParams["lines.linewidth"] = args.linewidth
import matplotlib.font_manager as fnt
import matplotlib.pyplot as plt
if not args.offset:
import matplotlib.ticker as tk
marques = tk.ScalarFormatter(useOffset=False)
colo = (plt.rcParams["axes.prop_cycle"].by_key()["color"]) * 3
if args.colors: # if flag -co present, overwrite default colors
colo[: len(args.colors)] = args.colors
fig = plt.figure(1)
mpl_keys = ("title", "xlim", "xlabel", "xscale", "ylim", "ylabel", "yscale")
axes_kwargs = {
k: args.__dict__[k]
for k in mpl_keys
if k in args.__dict__ and args.__dict__[k]
}
graf = fig.add_subplot(111, **axes_kwargs)
if args.pqg: # import PyQtGraph if specified
import pymesa.plot_tools as pymp
pqgPlot = pymp.pqgCustomPlot(
title=args.title,
xlabel=args.xlabel,
ylabel=args.ylabel,
xrng=args.xlim,
yrng=args.ylim,
xlogscale=args.xscale == "log",
ylogscale=args.yscale == "log",
)
pqgPlot.set_pqgWindow()
colcount = 0 # overall plot counter, used for pqg color
legcount = 0 # legend label counter
fpat = re.compile(
r"(?P<nom>[^/\.]+)\."
) # regular expression to obtain the name of the file without extension
for filecount, doc in enumerate(args.files): # loop over each file
if args.headers: # print headers
hdr, data = pym.read_mesafile(doc)
print(doc)
pym.terminal_print(data.columns, columns=args.terminalcols, order=args.order)
else:
docols = [
col for col in re.split(",", args.columns[filecount])
] # get headers to plot
hdr, data = pym.read_mesafile(doc, usecols=docols)
numplots = len(docols) - 1
if args.legend: # check legend labels
leg = args.legend[legcount : legcount + numplots]
legcount += numplots
else: # if unexistent, set legend labels to corresponent header
leg = [""] * numplots
for numpl, pl in enumerate(docols[1:]):
leg[numpl] = pl
x = data[docols[0]].astype("float") # set x values to first parsed column
for i in range(numplots):
y = data[docols[i + 1]].astype("float")
if args.pqg:
if args.colors:
pqgPlot.plot(x, y, color=args.colors[colcount], label=leg[i])
else:
pqgPlot.plot(x, y, color=(colcount, max(allplots, 9)), label=leg[i])
if mpl:
graf.plot(x, y, color=colo[colcount], label=leg[i])
colcount += 1
if not args.headers:
if mpl:
if not args.offset:
graf.yaxis.set_major_formatter(marques) # set axis marker format
graf.xaxis.set_major_formatter(marques)
graf.grid(True)
graf.legend(
loc="best", prop=fnt.FontProperties(size="medium"), title=args.legtit
)
fig.tight_layout()
if args.eps != "noeps": # save figure
if args.eps != "sieps": # save figure with specified name, extension is checked
seed = fpat.search(args.eps)
if seed:
figname = seed.group("nom") + ".eps"
else:
figname = args.eps + ".eps"
else: # set figure name to input file name
seed = fpat.search(args.files[0])
if seed:
figname = seed.group("nom") + ".eps"
else:
figname = args.files[0] + ".eps"
fig.savefig(figname, format="eps", dpi=1000)
if not args.np:
if args.pqg:
pqgPlot.show_pqgWindow()
else:
plt.show() # show figure with matplotlib
|
gpl-3.0
|
Caoimhinmg/PmagPy
|
programs/di_rot.py
|
3
|
2186
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import range
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import numpy
import pmagpy.pmag as pmag
def main():
"""
NAME
di_rot.py
DESCRIPTION
rotates set of directions to new coordinate system
SYNTAX
di_rot.py [command line options]
OPTIONS
-h prints help message and quits
-f specify input file, default is standard input
-F specify output file, default is standard output
-D D specify Dec of new coordinate system, default is 0
-I I specify Inc of new coordinate system, default is 90
INTPUT/OUTPUT
dec inc [space delimited]
"""
D,I=0.,90.
outfile=""
infile=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
data=numpy.loadtxt(infile)
else:
data=numpy.loadtxt(sys.stdin,dtype=numpy.float)
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=sys.argv[ind+1]
out=open(outfile,'w')
if '-D' in sys.argv:
ind=sys.argv.index('-D')
D=float(sys.argv[ind+1])
if '-I' in sys.argv:
ind=sys.argv.index('-I')
I=float(sys.argv[ind+1])
if len(data.shape)>1: # 2-D array
N=data.shape[0]
DipDir,Dip=numpy.ones(N,dtype=numpy.float).transpose()*(D-180.),numpy.ones(N,dtype=numpy.float).transpose()*(90.-I)
data=data.transpose()
data=numpy.array([data[0],data[1],DipDir ,Dip]).transpose()
drot,irot=pmag.dotilt_V(data)
drot=(drot-180.)%360. #
for k in range(N):
if outfile=="":
print('%7.1f %7.1f ' % (drot[k],irot[k]))
else:
out.write('%7.1f %7.1f\n' % (drot[k],irot[k]))
else:
d,i=pmag.dotilt(data[0],data[1],(D-180.),90.-I)
if outfile=="":
print('%7.1f %7.1f ' % ((d-180.)%360.,i))
else:
out.write('%7.1f %7.1f\n' % ((d-180.)%360.,i))
if __name__ == "__main__":
main()
|
bsd-3-clause
|
Sentient07/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
58
|
3265
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
bsd-3-clause
|
CVML/pymc3
|
pymc3/examples/ARM12_6.py
|
14
|
1715
|
import numpy as np
from pymc3 import *
import pandas as pd
data = pd.read_csv(get_data_file('pymc3.examples', 'data/srrs2.dat'))
cty_data = pd.read_csv(get_data_file('pymc3.examples', 'data/cty.dat'))
data = data[data.state == 'MN']
data['fips'] = data.stfips * 1000 + data.cntyfips
cty_data['fips'] = cty_data.stfips * 1000 + cty_data.ctfips
data['lradon'] = np.log(np.where(data.activity == 0, .1, data.activity))
data = data.merge(cty_data, 'inner', on='fips')
unique = data[['fips']].drop_duplicates()
unique['group'] = np.arange(len(unique))
unique.set_index('fips')
data = data.merge(unique, 'inner', on='fips')
obs_means = data.groupby('fips').lradon.mean()
n = len(obs_means)
lradon = np.array(data.lradon)
floor = np.array(data.floor)
group = np.array(data.group)
model = Model()
with model:
groupmean = Normal('groupmean', 0, 10. ** -2.)
# as recommended by "Prior distributions for variance parameters in
# hierarchical models"
groupsd = Uniform('groupsd', 0, 10.)
sd = Uniform('sd', 0, 10.)
floor_m = Normal('floor_m', 0, 5. ** -2.)
means = Normal('means', groupmean, groupsd ** -2., shape=n)
lr = Normal(
'lr', floor * floor_m + means[group], sd ** -2., observed=lradon)
def run(n=3000):
if n == "short":
n = 50
with model:
start = {'groupmean': obs_means.mean(),
'groupsd_interval': 0,
'sd_interval': 0,
'means': np.array(obs_means),
'floor_m': 0.,
}
start = find_MAP(start, [groupmean, sd, floor_m])
step = NUTS(model.vars, scaling=start)
trace = sample(n, step, start)
if __name__ == '__main__':
run()
|
apache-2.0
|
jLantxa/spectrum
|
prototype/analysis.py
|
1
|
8423
|
# analysis.py
#
# Copyright March 2016, Javier L. Vazquez <[email protected]>
#
# This file is part of SPECTRUM
# Cognitive SDR prototype using BladeRF and the GNU Radio framework.
#
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import numpy as n_pkt
from scipy.signal import butter, filtfilt, freqz
import matplotlib.pyplot as plt
from math import sqrt
from math import log10
import numpy as np
import time
# Returns a list of delta and width values and the number of packets detected
# over the power threshold (th)
def extract_packets(data, th):
last_rising_edge = 0
last_falling_edge = 0
pulse_delta = []
pulse_width = []
pulses = 0
## EXTRACT PACKETS
last_bit = 0
pulse_delta_i = 0
pulse_width_i = 0
for i in range(0, len(data)):
bit = 0
if data[i] >= th:
bit = 1
if bit == 1 and last_bit == 0:
pulse_delta.append(i - last_falling_edge)
pulse_delta_i = pulse_delta_i + 1
last_rising_edge = i
pulses += 1
if bit == 0 and last_bit == 1:
pulse_width.append(i - last_rising_edge)
pulse_width_i = pulse_width_i + 1
last_falling_edge = i
last_bit = bit
# Channel is always full (one packet in whole window)
# The delta and width cannot be estimated because the pulse is longer than
# the observation window.
if pulses == 0 and data[0] >= th:
pulse_delta = [0]
pulse_width = None
pulses = 1
return [pulse_delta, pulse_width, pulses]
# Analyse packet statistics for a given threshold
def packet_statistics(data, th, samp_rate, nsamples, debug=False):
print("Thres=" +str(th))
data_rate = samp_rate / nsamples;
[pulse_delta, pulse_width, pulses] = extract_packets(data, th)
# If there is a space between pulses, i.e. if at least one pulse was
# detected which was smaller than the observation window:
if pulses > 1 and pulse_delta is not None:
slots = np.array(pulse_delta)/data_rate
packets = np.array(pulse_width)/data_rate
## STATISTICS
# Averages
# if len(pulse_delta) > 0:
# acc_delta = 0
# for i in range (0, len(pulse_delta)):
# acc_delta = acc_delta + pulse_delta[i]
# pulse_delta_average = (acc_delta / len(pulse_delta)) / data_rate
# if len(pulse_width) > 0:
# acc_width = 0
# for i in range(1, len(pulse_width)):
# acc_width = acc_width + pulse_width[i]
# pulse_width_average = (acc_width / len(pulse_width)) / data_rate
# Histogram
min_slot = 0 if len(slots) == 0 else min(slots)
max_slot = len(data)/data_rate if len(slots) == 0 else max(slots)
min_packet = 0 if len(packets) == 0 else min(packets)
max_packet = len(data)/data_rate if len(packets) == 0 else max(packets)
slots_centers, slots_hist = histogram(slots, 1024, min_slot, max_slot)
packets_centers, packets_hist = histogram(packets, 1024, min_packet, max_packet)
most_frequent_slot = slots_centers[slots_hist.index(max(slots_hist))]
most_frequent_packet = packets_centers[packets_hist.index(max(packets_hist))]
time_busy = sum(packets)
time_idle = sum(slots)
rho = (1.0*time_busy / (time_busy+time_idle)) if (time_busy + time_idle) > 0 else 0
results = {
"n_pkt" : pulses,
"slot" : most_frequent_slot,
"pkt" : most_frequent_packet,
"rho" : rho
}
# Otherwise there was only one long packet of unknown length Delta is 0,
# occupation is 100%, and the length of the packet is None to indicate
# uncertainty.
else:
results = {
"n_pkt" : 1,
"slot" : 0,
"pkt" : None,
"rho" : 1
}
print(results)
return results
def histogram(data, nbins, p_min, p_max):
delta = float(p_max - p_min)/nbins
hist = list()
centers = list()
for b in range(nbins):
centers.append(p_min + b*delta)
hist.append(0)
for sample in data:
for b in range(nbins):
if sample >= p_min + b*delta and sample < p_min + (b+1)*delta:
hist[b] += 1
return [centers, hist]
# Return a logarithmic scale histogram of power distribution
def log_power_histogram(data, nbins, p_min, p_max):
log_data = list()
for sample in data:
log_data.append(10*log10(sample))
centers, hist = histogram(log_data, nbins, p_min, p_max)
return [centers, hist]
# Returns the coefficients of a butterworth low pass filter.
def butter_lowpass(order, cutoff):
b, a = butter(order, cutoff, btype='low', analog=False)
return b, a
# Lowpass filter
def butter_lowpass_filter(data, order, cutoff):
b, a = butter_lowpass(order, cutoff)
y = filtfilt(b, a, data)
return y
# Return a list of power thresholds for optimal energy detection
# This approach can miss in largely occupied cannels
# If a power gaussian comming from actual packets is higher than the
# base noise, it will be treated as noise...
# If only there was a way to recognise that first big gaussian peak...
# IMPORTANT: Power thresholds returned in dB
def get_thresholds(data, bins, p_min, p_max, order, cutoff, debug=False):
centers, power_db = log_power_histogram(data, bins, p_min, p_max)
power_db_filt = butter_lowpass_filter(power_db, order, cutoff).tolist()
# Negative cicles
for p_ix, p in enumerate(power_db_filt):
if power_db_filt[p_ix] < 0:
power_db_filt[p_ix] = 0
max_ix = power_db_filt.index(max(power_db_filt))
th = list()
# Local min
for n in range(0, len(power_db_filt)-1):
if power_db_filt[n] < power_db_filt[n-1] and power_db_filt[n] < power_db_filt[n+1]:
th.append(n)
# Locate zero trails and calculate mean point
p_cursor = next(x[0] for x in enumerate(centers) if x[1] > -40)
while p_cursor < len(power_db_filt):
# Find first zero
if power_db_filt[p_cursor] != 0:
p_cursor += 1
continue
in_fz = p_cursor
# Find end of trail
for in_lz in range(in_fz, len(power_db_filt)):
# Found a zero
if power_db_filt[in_lz] == 0:
continue
# Trail is of length 1, so it was discovered by difference
if (in_lz - in_fz) < 2:
p_cursor = in_lz + 1
break
# Trail end found. Calculate mean point and append to the list
new_th = int((in_fz + in_lz)/2)
th.append(new_th)
p_cursor = in_lz + 1
break
# This BREAK is reached only if loop reaches the end
# If removed, the algorithm fails when last samples are a trail of 0
# since the cursor is not updated after the loop
break
# Convert th from histogram index to power
p_th = list()
for t in th:
p_th.append(centers[t])
p_th.sort()
if debug:
plt.figure("Power histogram")
plt.clf()
plt.title("Power thresholds")
plt.xlabel("Power [dB]")
plt.plot(centers, power_db)
plt.plot(centers, power_db_filt)
plt.axvline(p_th[0])
plt.draw()
plt.pause(0.0001)
return p_th
# Return geometric mean of a vector
# The mean is calculated first in decibels
def get_geometric_mean(data):
acc = 10*log10(data[0])
for elem in data[1:]:
acc += 10*log10(elem)
mean_db = acc / len(data)
mean = 10**(mean_db/10.0)
return mean
def get_histogram_max(data, bins, p_min, p_max):
centers, power_db = log_power_histogram(data, bins, p_min, p_max)
max_hist = centers[power_db.index(max(power_db))]
print("MAX = " + str(max_hist))
return max_hist
|
gpl-3.0
|
smartscheduling/scikit-learn-categorical-tree
|
examples/mixture/plot_gmm_classifier.py
|
250
|
3918
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
anderson1008/NOCulator
|
hring/src/Script/slowdown_estimation.py
|
1
|
10892
|
#!/usr/bin/python
import sys
import os
import re
import fnmatch
import string
import get
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import FuncFormatter
from math import log, exp
# slowdown_estimation
ipc_alone_error_accum = 0
NODE = 0
num_app = 26
spec_workload = ["400.perlbench.bin.gz ","401.bzip2.bin.gz ","403.gcc.bin.gz ","429.mcf.bin.gz ","433.milc.bin.gz ","435.gromacs.bin.gz ","436.cactusADM.bin.gz ",\
"437.leslie3d.bin.gz ","444.namd.bin.gz ","445.gobmk.bin.gz ","447.dealII.bin.gz ","450.soplex.bin.gz ","453.povray.bin.gz ","454.calculix.bin.gz ",\
"456.hmmer.bin.gz ","458.sjeng.bin.gz ","459.GemsFDTD.bin.gz ","462.libquantum.bin.gz ","464.h264ref.bin.gz ","465.tonto.bin.gz ","470.lbm.bin.gz ",\
"471.omnetpp.bin.gz ","473.astar.bin.gz ","481.wrf.bin.gz ","482.sphinx3.bin.gz ","483.xalancbmk.bin.gz "]
error_rate_per_app = []
for i in range (num_app):
error_rate_per_app.append([])
error_rate_count = [0] * num_app
plt_error = []
total_num_of_sample_point = 0
def compare (s1, s2):
remove = string.whitespace
return s1.translate(None, remove) == s2.translate (None, remove)
# compute the average error rate in terms of each application
def compute (error_per_app, error_count):
print "--------------------------- " + network_size + " -------------------------------"
print "*********** The error rate of each application **********"
geo_avg_error_per_app = [0] * num_app
for j in range (0, num_app, 1):
if error_count[j] != 0:
geo_avg_error_per_app [j] = cmp_geo_avg(error_per_app[j])
#avg_error_per_app[j] = error_sum[j] / error_count[j]
workload_out = re.search(r'\d+\.(\w+)',spec_workload[j])
print workload_out.group(1).ljust(30) + str("%.4f" % geo_avg_error_per_app[j])
print "--------------------------------------------------------------------------------"
def compute_error_rate (num_file):
global raw_out_dir
global workload
global ref_ipc
global NODE
global error_rate_count
global error_rate_per_app
global ipc_alone_error_accum
global plt_error
global total_num_of_sample_point
slowdown_error_raw = []
slowdown_error_accum = 0
for sim_index in range (1, num_file + 1, 1):
raw_out_file_name = "sim_" + str(sim_index) + ".out"
for file in os.listdir(raw_out_dir):
if fnmatch.fnmatch(file, raw_out_file_name):
fo_in = open(raw_out_dir + file, "r")
content = fo_in.read()
fo_in.close()
insns_persrc = get.get_insns_persrc (content)
active_cycles = get.get_active_cycles (content)
non_overlap_penalty = get.get_non_overlap_penalty (content)
workload_array = re.split('[ ]', workload[sim_index])
ipc_ref = re.split('[ ]',ref_ipc[sim_index])
for i in range (0, NODE, 1):
est_ipc_alone = float(insns_persrc[i]) / (int(active_cycles[i]) - int(non_overlap_penalty[i]))
ipc_alone_error = (est_ipc_alone - float(ipc_ref[i])) / float(ipc_ref[i])
#print ipc_alone_error
ipc_alone_error_accum = ipc_alone_error_accum + abs(ipc_alone_error)
ipc_share = float(insns_persrc[i]) / int(active_cycles[i])
est_slowdown = est_ipc_alone / ipc_share
actual_slowdown = float(ipc_ref[i]) / ipc_share
#print actual_slowdown
slowdown_error = (est_slowdown - actual_slowdown) / actual_slowdown
#print slowdown_error
# slowdown error distribution profiling
plt_error = plt_error + [abs(slowdown_error)]
slowdown_error_raw = slowdown_error_raw + [abs(slowdown_error)]
slowdown_error_accum = slowdown_error_accum + abs(slowdown_error)
total_num_of_sample_point = total_num_of_sample_point + 1
for j in range (0, num_app, 1):
if compare (workload_array [i], spec_workload[j]):
error_rate_per_app [j] = [abs(slowdown_error)] + error_rate_per_app [j]
error_rate_count [j] = error_rate_count [j] + 1
return [slowdown_error_raw, slowdown_error_accum]
network_size = ''
NODE = -1
def cmd_input ():
# getting the input
#input_workload = raw_input('please input workload (homo_mem, hetero): ' )
global network_size
global NODE
network_size = raw_input('please input network size (4x4, 8x8):' )
if network_size == "4x4":
NODE = 16
elif network_size == "8x8":
NODE = 64
if (NODE == 0):
raise Exception ("Size of network is undefined")
def comp_avg_error (num_file):
# compute the average error
global input_workload
global number_file
global workload_dir
global raw_out_dir
global workload
global ref_ipc
ipc_ref_file_name = workload_dir + "workload_list/" + input_workload + "_" + network_size + "_ipc"
workload_file_name = workload_dir + "workload_list/" + input_workload + "_" + network_size
raw_out_dir = workload_dir + "/" + input_workload + "/" + network_size + "/baseline/"
ref_ipc_file = open (ipc_ref_file_name)
ref_ipc = ref_ipc_file.readlines()
ref_ipc_file.close()
workload_file = open (workload_file_name)
workload = workload_file.readlines()
workload_file.close()
[error_rate_raw, error_rate_sum] = compute_error_rate(num_file)
return [error_rate_raw, error_rate_sum]
def to_percent(y, position):
# Ignore the passed in position. This has the effect of scaling the default
# tick locations.
s = str(100 * y)
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
def cmp_geo_avg (error_raw):
# error rate has to be an array or list
new_error_raw = [log(x) for x in error_raw]
return exp(sum (new_error_raw)/len(new_error_raw))
#cmd_input()
network_size = "4x4"
NODE =16
input_workload = "random"
num_file_0 = 30
workload_dir = "/Users/xiyuexiang/Desktop/SlowdownError/"
#print "Going to simulate " + input_workload + " workload (file counts) : " + str(num_file_0)
[error_raw_0, error_sum_0] = comp_avg_error(num_file_0)
avg_slowdown_error = error_sum_0 / num_file_0 / NODE
geo_avg_slowdown_error = cmp_geo_avg (error_raw_0)
#print "********** Average Error Rate of " + input_workload + " (Low network intensity)************ \n%.4f\n" % avg_slowdown_error
print "********** Average Geometric Error Rate of " + input_workload + " (Low network intensity, 4x4)************ \n%.4f" % geo_avg_slowdown_error
input_workload = "homo_mem"
num_file_1 = 30
workload_dir = "/Users/xiyuexiang/Desktop/SlowdownError/"
#print "Go toing simulate " + input_workload + " workload (file counts) : " + str(num_file_1)
[error_raw_1, error_sum_1] = comp_avg_error(num_file_1)
avg_slowdown_error = error_sum_1 / num_file_1 / NODE
geo_avg_slowdown_error = cmp_geo_avg (error_raw_1)
#print "********** Average Error Rate of " + input_workload + " (High network intensity)******** \n%.4f\n" % avg_slowdown_error
print "********** Average Geometric Error Rate of " + input_workload + " (High network intensity, 4x4)************ \n%.4f" % geo_avg_slowdown_error
input_workload = "hetero"
num_file_2 = 30
workload_dir = "/Users/xiyuexiang/Desktop/SlowdownError/"
#print "Go toing simulate " + input_workload + " workload (file counts) : " + str(num_file_2)
[error_raw_2,error_sum_2] = comp_avg_error(num_file_2)
avg_slowdown_error = error_sum_2 / num_file_2 / NODE
geo_avg_slowdown_error = cmp_geo_avg (error_raw_2)
#print "******** Average Error Rate of " + input_workload + " (Medium network intensity) ******** \n%.4f\n" % avg_slowdown_error
print "********** Average Geometric Error Rate of " + input_workload + " (Medium network intensity, 4x4)************ \n%.4f" % geo_avg_slowdown_error
overall_error_raw = error_raw_0 + error_raw_1 + error_raw_2
overall_geo_avg = cmp_geo_avg (overall_error_raw)
print str("********** The overall geometric average error rate *************\n%.4f" % overall_geo_avg)
print "-------------------------------------------------------------------------------------------------"
compute (error_rate_per_app, error_rate_count)
network_size = "8x8"
NODE = 64
error_rate_count = [0] * num_app
error_rate_per_app = []
for i in range (num_app):
error_rate_per_app.append([])
input_workload = "random"
num_file_0 = 30
workload_dir = "/Users/xiyuexiang/Desktop/SlowdownError/"
#print "Going to simulate " + input_workload + " workload (file counts) : " + str(num_file_0)
[error_raw_0, error_sum_0] = comp_avg_error(num_file_0)
avg_slowdown_error = error_sum_0 / num_file_0 / NODE
geo_avg_slowdown_error = cmp_geo_avg (error_raw_0)
#print "********** Average Error Rate of " + input_workload + " (Low network intensity)************ \n%.4f\n" % avg_slowdown_error
print "********** Average Geometric Error Rate of " + input_workload + " (Low network intensity, 8x8)************ \n%.4f" % geo_avg_slowdown_error
input_workload = "homo_mem"
num_file_1 = 30
workload_dir = "/Users/xiyuexiang/Desktop/SlowdownError/"
#print "Go toing simulate " + input_workload + " workload (file counts) : " + str(num_file_1)
[error_raw_1, error_sum_1] = comp_avg_error(num_file_1)
avg_slowdown_error = error_sum_1 / num_file_1 / NODE
geo_avg_slowdown_error = cmp_geo_avg (error_raw_1)
#print "********** Average Error Rate of " + input_workload + " (High network intensity)******** \n%.4f\n" % avg_slowdown_error
print "********** Average Geometric Error Rate of " + input_workload + " (High network intensity, 8x8)************ \n%.4f" % geo_avg_slowdown_error
input_workload = "hetero"
num_file_2 = 30
workload_dir = "/Users/xiyuexiang/Desktop/SlowdownError/"
#print "Go toing simulate " + input_workload + " workload (file counts) : " + str(num_file_2)
[error_raw_2,error_sum_2] = comp_avg_error(num_file_2)
avg_slowdown_error = error_sum_2 / num_file_2 / NODE
geo_avg_slowdown_error = cmp_geo_avg (error_raw_2)
#print "******** Average Error Rate of " + input_workload + " (Medium network intensity) ******** \n%.4f\n" % avg_slowdown_error
print "********** Average Geometric Error Rate of " + input_workload + " (Medium network intensity, 8x8)************ \n%.4f" % geo_avg_slowdown_error
print "---------------------------------------------------------------------------------------------------"
overall_error_raw = error_raw_0 + error_raw_1 + error_raw_2
overall_geo_avg = cmp_geo_avg (overall_error_raw)
print str("********** The overall geometric average error rate *************\n%.4f" % overall_geo_avg)
print "---------------------------------------------------------------------------------------------------"
# compute the error rate of each application
compute (error_rate_per_app, error_rate_count)
frequency, num_bin, patches = plt.hist(plt_error,bins=20)
plt.show()
#print frequency
#print num_bin
#print patches
|
mit
|
krez13/scikit-learn
|
examples/neighbors/plot_classification.py
|
287
|
1790
|
"""
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
bsd-3-clause
|
vigilv/scikit-learn
|
sklearn/ensemble/partial_dependence.py
|
251
|
15097
|
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
bsd-3-clause
|
kjung/scikit-learn
|
examples/model_selection/grid_search_text_feature_extraction.py
|
99
|
4163
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
bsd-3-clause
|
adrn/streams
|
scripts/phase-hack.py
|
1
|
1859
|
# coding: utf-8
""" TriAnd RR Lyrae """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
from datetime import datetime, timedelta, time, date
# Third-party
import astropy.coordinates as coord
import astropy.units as u
from astropy.io import ascii
from astropy.table import Column
from astropy.time import Time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Project
from streams.coordinates import sex_to_dec
from streams.observation.time import gmst_to_utc, lmst_to_gmst
from streams.observation.rrlyrae import time_to_phase, phase_to_time
from streams.observation.triand import all_stars, triand_stars, standards
from streams.util import project_root
# CHANGE THIS
kitt_peak_longitude = (111. + 35/60. + 40.9/3600)*u.deg
def main():
tbl = ascii.read("/Users/adrian/Downloads/jdlist.txt", delimiter=",",
names=["file_name", "object_name", "jd"])
phase_data = np.zeros(len(tbl))*np.nan
for i,line in enumerate(tbl):
t = Time(line['jd'], format='jd', scale='utc')
try:
object_name = line['object_name'].strip().replace(" ", "_")
this_star = filter(lambda s: s['name'] == object_name, all_stars)[0]
except IndexError:
print("Skipping {}...".format(object_name))
continue
except AttributeError:
print("No name in this line!")
continue
period = this_star['period']*u.day
t0 = Time(this_star['rhjd0'], scale='utc', format='mjd')
phase = time_to_phase(t, period=period, t0=t0)
phase_data[i] = phase
col = Column(phase_data,"phase")
tbl.add_column(col)
ascii.write(tbl, "/Users/adrian/Downloads/jdlist-with-phase.txt", delimiter=",")
if __name__ == "__main__":
main()
|
mit
|
hande-qmc/hande
|
tools/pyhande/pyhande/error_analysing/find_starting_iteration.py
|
1
|
13890
|
"""Functions to find starting iteration for analysis."""
from typing import Dict, List
import warnings
import math
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pyblock
from pyhande.helpers.simple_callables import RaiseValueError
def _show_starting_iterations_graph(
data: pd.DataFrame, it_key: str, col_to_show: str,
starting_it: int) -> None:
"""
Show a plot of data of specified column with starting iteration.
Parameters
----------
data : pd.DataFrame
Data where starting iteration was found.
it_key : str
Key of iteration column.
col_to_show : str
Key of column to plot here.
starting_it : int
Suggested/found starting iteration.
"""
plt.xlabel(it_key)
plt.ylabel(col_to_show)
plt.plot(data[it_key], data[col_to_show], 'b-', label='data')
plt.axvline(starting_it, color='r',
label='Suggested starting iteration')
plt.legend(loc='best')
plt.show()
def _get_blocking_loss(start_ind: int, data: pd.DataFrame) -> pd.Series:
"""
Get blocking loss which is to be minimised.
Loss here is defined as the fractional standard error over the
square root of the number of data points used for the blocking.
Parameters
----------
start_ind : int
Index where blocking will start. The blocking loss of that
analysis will be found.
data : pd.DataFrame
Data to be blocked, only including columns that are blocked.
Returns
-------
pd.Series
Index consists of the columns in `data` considered when finding
starting iteration. Values are the losses for each column
respectively. If loss calculation fails, respective value is
NaN.
"""
(_, reblock, _) = pyblock.pd_utils.reblock(data.iloc[start_ind:])
opt_block = pyblock.pd_utils.reblock_summary(reblock)
try:
success = all([col in opt_block.index for col in data.columns])
series = False
except AttributeError:
# data is a Series, not DataFrame.
success = data.name in opt_block.index
series = True
if success:
loss = ((opt_block['standard error error'] /
opt_block['standard error']) /
math.sqrt(float(len(data.iloc[start_ind:]))))
if not loss.isna().any():
return loss.astype('float64')
# 'nan' values can easily be ignored by _grid_search.
if series:
return pd.Series([float('nan')], index=[data.name])
return pd.Series([float('nan')]*len(data.columns), index=data.columns)
def _grid_search(data: pd.DataFrame, grid_size: int, min_ind: int,
max_ind: int) -> int:
"""
Do log adaptive grid search between `min_ind` and `max_ind`.
Parameters
----------
data : pd.DataFrame
Data to be blocked, only including columns that are blocked.
grid_size : int
Number of logarithmically spaced grid points per run.
min_ind : int
Minimum value of to be found `start_ind`.
max_ind : int
Maximum value of to be found `start_ind`.
Returns
-------
int
Found `start_ind`, index where blocking will start.
"""
while grid_size > 2:
grid_pts = np.logspace(
np.log10(min_ind), np.log10(int(max_ind)), grid_size)
# The grid points correspond to possible - discrete - start
# indices so resolution is 1 at best.
while int(grid_pts[0]) == int(grid_pts[1]) and grid_size > 2:
grid_pts = np.delete(grid_pts, 0)
grid_size -= 1
if int(grid_pts[0]) == int(grid_pts[1]) and grid_size == 2:
# Only two grid points remain and they are identical.
# Can stop looping and return their value.
return int(grid_pts[0])
losses = pd.concat(
[_get_blocking_loss(int(grid_pt), data) for grid_pt in grid_pts],
keys=list(map(int, grid_pts)), axis=1)
# idxmin(axis=1) will compare values in each row, giving the
# losses column name where the minimum is. The losses column
# names correspond to the grid points. Of those, we want the
# lowest grid point (.min()).
poss_min = losses.idxmin(axis=1).min()
# Find next minimum if the one above is ignored. Of the grid
# point found, select highest to be most conservative.
try:
poss_max = losses.drop(columns=poss_min).idxmin(axis=1).max()
except KeyError:
raise RuntimeError("Failed to find starting iteration. "
"There might not be enough data.")
# Sort.
poss_min, poss_max = ((poss_min, poss_max) if poss_min < poss_max
else (poss_max, poss_min))
min_ind = max(min_ind, poss_min)
max_ind = min(max_ind, poss_max)
if min_ind == int(grid_pts[0]) and max_ind == int(grid_pts[-1]):
break
return max_ind
def find_starting_iteration_blocking(
data: pd.DataFrame, end_it: int, it_key: str, cols: List[str],
hybrid_col: str, start_max_frac: float = 0.8,
grid_size: int = 10, number_of_reblocks_to_cut_off: int = 1,
show_graph: bool = False) -> int:
"""
Find the best iteration to start analysing CCMC/FCIQMC data.
It first excludes data before not all data in all columns specified
in `cols` are varying and after `end_it`. Then it searches for the
starting iteration using an adaptive grid search on a log scale
since we assume that the starting iteration is closer to the
beginning than the end of the available data. During the search, a
loss function is minimised. The loss is the fractional error over
number of data involved in the blocking for each data column in
`cols`.
This implementation is based on an older version in pyhande/lazy.py.
V. A. Neufeld thanks the EPSRC CDT CMMS cohort 1 in Cambridge for
helpful discussions.
.. warning::
Use with caution, check whether output is sensible and adjust
parameters if necessary.
Parameters
----------
data : pd.DataFrame
QMC data, e.g. as extracted by extract.py. Has to contain
columns with key `it_key` and columns in `cols`, used for
blocking.
end_it : int
Last iteration to be considered in blocking.
it_key : str
Key of column containing MC iterations.
cols : List[str]
List of keys of columns involved in blocking.
hybrid_col : str
Ignored here, for common interface.
start_max_frac : float, optional
The start iterations found has to be in the first
`start_max_frac` fraction of the data between
the point where all columns in `cols` have started varying and
`end_it`. This prevents finding a starting iteration too close
to the end. Has to be between 0.00001 and 1.0.
The default is 0.8.
grid_size : int, optional
Number of logarithmically spaced grid points per run.
The default is 10.
number_of_reblocks_to_cut_off : int, optional
To be extra sure, cut off a few reblocks to make sure data after
starting iteration is truly in equilibrium. Cannot be negative.
The default is 1.
show_graph : bool, optional
If True, show a graph showing the columns with key `cols[0]` as
a function of iterations. The suggested starting iteration is
highlighted. The default is False.
Raises
------
ValueError
If `start_max_frac` or
`number_of_reblocks_to_cut_off` are out of range.
RuntimeError
If not all columns with keys in `cols` have started varying in
`data` or if suitable starting iteration was not found.
Returns
-------
int
Suggestion iteration in columns `it_key` where analysis should
start.
"""
# Check some inputs.
if (start_max_frac < 0.00001 or
start_max_frac > 1.0):
raise ValueError("0.00001 < start_max_frac < 1 not "
"satisfied!")
if number_of_reblocks_to_cut_off < 0:
raise ValueError("'number_of_reblocks_to_cut_off' can't be negative!")
# Data cleaning.
# Remove iterations passed the specified end iteration and make
# sure all cols have started varying in dataset excluding data
# before.
data = data[data[it_key] <= end_it]
max_varying_it = 0
for col in cols:
if data[data[col] != data[col].iloc[0]].empty:
raise RuntimeError(f"{col} has not started varying in considered "
"dataset.")
max_varying_it = max(
max_varying_it,
data[data[col] != data[col].iloc[0]][it_key].iloc[0])
data = data[data[it_key] >= max_varying_it]
# Finding starting iteration.
# Do grid search to find the index of the starting iteration.
start_ind = _grid_search(
data[cols], grid_size, 1, int(start_max_frac*len(data)) + 1)
# Search has failed if index is too close to the end.
if start_ind > int(start_max_frac*len(data)):
raise RuntimeError("Failed to find starting iteration. "
"Found starting iteration too close to end. "
"Possibly need more data.")
# Discarding number_of_reblocks_to_cut_off reblocks.
(_, reblock, _) = pyblock.pd_utils.reblock(data[cols].iloc[start_ind:])
opt_ind = pyblock.pd_utils.optimal_block(reblock)
discard_indx = 2**opt_ind * number_of_reblocks_to_cut_off
# Converting to iteration.
if start_ind + discard_indx > len(data) - 1:
raise RuntimeError("Failed to find starting iteration. "
"Tried to remove the number of reblocks and "
"left with no data.")
starting_it = data[it_key].iloc[start_ind + discard_indx]
# Show plot if desired, aiding judgment whether to trust estimate.
if show_graph:
# Note that the data has non varying phase cut off!
_show_starting_iterations_graph(data, it_key, cols[0], starting_it)
return starting_it
def find_starting_iteration_mser_min(
data: pd.DataFrame, end_it: int, it_key: str, cols: List[str],
hybrid_col: str, start_max_frac: float = 0.84,
n_blocks: int = 100) -> int:
r'''Estimate starting iteration with MSER minimization scheme.
.. warning::
Use with caution, check whether output is sensible and adjust
parameters if necessary.
This function gives an optimal estimation of the starting
interations based on MSER minimization heuristics.
This methods decides the starting iterations :math:`d` as minimizing
an evaluation function
MSER(:math:`d`) =
:math:`\Sigma_{i=1}^{n-d} ( X_{i+d} - X_{mean}(d) ) / (n-d)^2`.
Here, :math:`n` is length of time-series, :math:`X_i` is
`eval_ratio['num']` / `eval_ratio['denom']` of :math:`i`-th step,
and :math:`X_{mean}` is the average of :math:`X_i` after the
:math:`d`-th step.
This is a reformatted and altered version of a previous
implementation in lazy.py by Tom Ichibha.
See Ichibha, T., Hongo, K., Maezono, R., Thom, A. J. W., 2019
arXiv:1904.09934 [physics.comp-ph]
Parameters
----------
data : :class:`pandas.DataFrame`
Calculation output of a FCIQMC or CCMC calculation.
end_it : int
Last iteration to be considered in blocking.
it_key : str
Key of column containing MC iterations.
cols : List[str]
Ignored here. Keep for common interface.
hybrid_col: str
Column in data to be analysed here, e.g. 'Inst. Proj. Energy'.
start_max_frac : float
MSER(d) may oscillate when become unreanably small
when :math:`n-d` is large. Thus, we calculate MSER(:math:`d`)
for :math:`d` < (:math:`n` * start_max_frac) and
give the optimal estimation of the starting iterations
only in this range of :math:`d`.
The default is 0.84.
n_blocks : int
This analysis takes long time when :math:`n` is large.
Thus, we pick up :math:`d` for every 'n_blocks' samples,
calculate MSER(:math:`d`), and decide the optimal estimation of
the starting iterations only from these `d`.
The default is 100.
Returns
-------
starting_it: int
Iteration from which to start reblocking analysis for this
calculation.
'''
data = data[data[it_key] <= end_it]
inst_ratio = data[hybrid_col]
mser_min = float('inf')
for i in range(n_blocks):
start_ind = int(i*(len(inst_ratio)*start_max_frac)/n_blocks)
mser = (np.var(inst_ratio.iloc[start_ind:len(inst_ratio)]) /
(len(inst_ratio)-start_ind))
if mser < mser_min:
mser_min = mser
starting_it = data[it_key].iloc[start_ind]
final_start_ind = start_ind
if final_start_ind > len(inst_ratio)*(start_max_frac**2):
warnings.warn(
f"Instantaneous ratio '{hybrid_col}' may not be "
"converged. MSER min. may underestimate the starting iteration. "
"Check!")
return starting_it
def select_find_start(key: str):
"""Select find_starting_iteration function to use.
Parameters
----------
key : str
Key linked to find_starting_iteration.
Returns
-------
Find_starting_iteration function.
"""
return {'blocking': find_starting_iteration_blocking,
'mser': find_starting_iteration_mser_min}.get(
key, RaiseValueError("The find start iteration selected in "
f"'start_its', '{key}', is not "
"available!"))
|
lgpl-2.1
|
msrconsulting/atm-py
|
atmPy/for_removal/UHSAS/UHSAS.py
|
6
|
8384
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 10 11:43:10 2014
@author: htelg
"""
import datetime
import warnings
from io import StringIO as io
import numpy as np
import pandas as pd
import pylab as plt
from scipy.interpolate import UnivariateSpline
from atmPy.atmos import timeseries
from atmPy.aerosols.size_distr import sizedistribution
def read_csv(fname, norm2time = True, norm2flow = True):
uhsas_file_types = ['.xls']
first = True
if type(fname).__name__ == 'list':
for file in fname:
for i in uhsas_file_types:
if i in file:
right_file_format = True
else:
right_file_format = False
if right_file_format:
sdt, hkt= _read_csv(file, norm2time = norm2time, norm2flow = norm2flow)
if first:
sd = sdt.copy()
hk = hkt.copy()
first = False
else:
if not np.array_equal(sd.bincenters, sdt.bincenters):
txt = 'the bincenters changed between files! No good!'
raise ValueError(txt)
sd.data = pd.concat((sd.data,sdt.data))
hk.data = pd.concat((hk.data,hkt.data))
if first:
txt = """Either the prvided list of names is empty, the files are empty, or none of the file names end on
the required ending (*.xls)"""
raise ValueError(txt)
else:
sd, hk= _read_csv(fname, norm2time = norm2time, norm2flow = norm2flow)
return sd, hk
def _read_csv(fname, norm2time = True, norm2flow = True):
uhsas = _readFromFakeXLS(fname)
# return uhsas
sd,hk = _separate_sizedist_and_housekeep(uhsas, norm2time = norm2time, norm2flow = norm2flow)
hk = timeseries.TimeSeries(hk)
# return size_distr,hk
bins = _get_bins(sd)
# return bins
dist = sizedistribution.SizeDist_TS(sd, bins, "numberConcentration")
return dist, hk
def _readFromFakeXLS(fname):
"""reads and shapes a XLS file produced by the uhsas instrument"""
fr = pd.read_csv(fname, sep='\t')
newcolname = [fr.columns[e] + ' ' + str(fr.values[0][e]) for e, i in enumerate(fr.columns)]
fr.columns = newcolname
fr = fr.drop(fr.index[0])
bla = pd.Series(fr['Date -'].values + ' ' + fr['Time -'].values)
# return bla
try:
fr.index = bla.map(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %H:%M:%S.%f'))
except ValueError:
fr.index = bla.map(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %I:%M:%S.%f %p'))
fr = fr.drop(['Date -', 'Time -'], axis=1)
return fr
def _separate_sizedist_and_housekeep(uhsas, norm2time = True, norm2flow = True):
"""Beside separating size distribution and housekeeping this
function also converts the data to a numberconcentration (#/cc)
Parameters
----------
uhsas: pandas.DataFrame"""
# size_distr = uhsas.copy()
# hk = uhsas.copy()
# # return size_distr,hk
first = False
for e,col in enumerate(uhsas.columns):
cola = col.split(' ')[0]
try:
float(cola)
float(col.split(' ')[1])
except ValueError:
continue
else:
last = e
if not first:
first = e
# k = size_distr.keys()
# where = np.argwhere(k == 'Valve 0=bypass') + 1
hk = uhsas.iloc[:,:first]
sd = uhsas.iloc[:,first:last+1]
# khk = k[: first]
# size_distr = size_distr.drop(khk, axis=1)
# hsd = k[where:]
# hk = hk.drop(hsd, axis=1)
# return size_distr,hk
hk['Sample sccm'] = hk['Sample sccm'].astype(float)
hk['Accum. Secs'] = hk['Accum. Secs'].astype(float)
# normalize to time and flow
if norm2time:
sd = sd.mul(1 / hk['Accum. Secs'], axis = 0 )
if norm2flow:
sd = sd.mul(60./hk['Sample sccm'], axis = 0 )
return sd,hk
def _get_bins(frame, log=False):
"""
get the bins from the column labels of the size distribution DataFrame.
"""
frame = frame.copy()
bins = np.zeros(frame.keys().shape[0]+1)
for e, i in enumerate(frame.keys()):
bin_s, bin_e = i.split(' ')
bin_s = float(bin_s)
bin_e = float(bin_e)
bins[e] = bin_s
bins[e+1] = bin_e
return bins #binCenters
def _string2dataframe(data):
sb = io(data)
dataFrame = pd.read_csv(sb,
# sep=' ',
names=('d', 'bin_no')
).sort('d')
return dataFrame
def read_calibration_fromString(data):
'''
unit of diameter must be nm
e.g.:
data = """120., 19.5
130., 22.5
140., 25
150., 27.6
173., 33.
200., 38.
233., 43.4
270., 47.5
315., 53.
365., 58.
420., 62.5
490., 67.
570., 71.
660., 75.
770., 78.
890., 79.
1040., 84."""
'''
dataFrame = _string2dataframe(data)
# return dataFrame
calibrationInstance = calibration(dataFrame)
return calibrationInstance
class calibration:
def __init__(self,dataTabel):
self.data = dataTabel
self.calibrationFunction = self.get_calibrationFunctionSpline()
def save_csv(self,fname):
# save_Calibration(self,fname)
self.data.to_csv(fname, index = False)
return
def get_calibrationFunctionSpline(self, fitOrder=1):
"""
Performes a spline fit/smoothening (scipy.interpolate.UnivariateSpline) of d over amp (yes this way not the other way around).
Returns (generates): creates a function self.spline which can later be used to calculate d from amp
Optional Parameters:
\t s: int - oder of the spline function
\t noOfPts: int - length of generated graph
\t plot: boolean - if result is supposed to be plotted
"""
# The following two step method is necessary to get a smooth curve.
#When I only do the second step on the cal_curve I get some wired whiggles
##### First Step
if (self.data.bin_no.values[1:]-self.data.bin_no.values[:-1]).min() < 0:
warnings.warn('The data represent a non injective function! This will not work. plot the calibration to see what I meen')
sf = UnivariateSpline(self.data.d.values, self.data.bin_no.values, s=fitOrder)
d = np.logspace(np.log10(self.data.d.values.min()), np.log10(self.data.d.values.max()), 500)
bin_no = sf(d)
# second step
cal_function = UnivariateSpline(bin_no, d, s=fitOrder)
return cal_function
def plot_calibration(self):
"""Plots the calibration function and data
Arguments
------------
cal: calibration instance
Returns
------------
figure
axes
calibration data graph
calibration function graph
"""
cal_function = self.calibrationFunction
bin_no = np.logspace(np.log10(self.data.bin_no.min()), np.log10(self.data.bin_no.max()), 500)
d = cal_function(bin_no)
f, a = plt.subplots()
cal_data, = a.plot(self.data.d, self.data.bin_no, 'o', label='data',)
cal_func, = a.plot(d, bin_no, label='function')
a.loglog()
a.set_xlim(0.9*self.data.d.min(), 1.1*self.data.d.max())
a.set_xlabel('Diameter (nm)')
a.set_ylim(0.9*self.data.bin_no.min(), 1.1*self.data.bin_no.max())
a.set_ylabel('bin number')
a.set_title('Calibration curve')
a.legend(loc = 2)
return f, a, cal_data, cal_func
def apply_on(self, dist, limit_to_cal_range = True):
dist_t = dist.copy()
bins_no = np.arange(dist_t.bins.shape[0])
cal_f = self.get_calibrationFunctionSpline()
new_d = cal_f(bins_no)
df = pd.DataFrame(np.array([bins_no, new_d]).transpose(), columns = ['bin_no','d'])
dist_t.bins = new_d
start_d = self.data.d.iloc[0]
end_d = self.data.d.iloc[-1]
if limit_to_cal_range:
dist_t = dist_t.zoom_diameter(start = start_d, end=end_d)
return dist_t
|
mit
|
ina-foss/ID-Fits
|
lib/unstable/fisher_vector.py
|
1
|
7643
|
# ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import sys
import cv2
import random
import argparse
import numpy as np
from utils.file_manager import pickleLoad, pickleSave
from utils.parallelize import parallelize
from learning.gmm import GMM
from dataset import *
from PythonWrapper.descriptors import *
from sklearn.decomposition import PCA
from yael import yael
def computeDenseDescriptor(image, cell_size=24, step=2, scales=5, scale_factor=1.41, pca=None, embed_spatial_information=False):
descriptor = LbpDescriptor('ulbp', cell_size=cell_size, step=step)
ndims = 59
descs = np.empty((0, ndims), dtype=np.float32)
img = image[44:206,62:188]
for s in range(scales):
patches = descriptor.compute(img, normalize=False, flatten=False)
descs = np.append(descs, patches, axis=0)
"""
if embed_spatial_information:
new_desc = np.empty((desc.shape[0]+2), dtype=np.float32)
new_desc[:-2] = desc
new_desc[-2:] = np.array([i/float(img.shape[0])-0.5, j/float(img.shape[0])-0.5])
desc = new_desc
"""
img = cv2.resize(img, None, fx=1/scale_factor, fy=1/scale_factor, interpolation=cv2.INTER_NEAREST)
if pca is not None:
return pca.transform(descs)
else:
return descs
class FisherVectors:
def __init__(self, cell_size=24, step=2, scales=3, embed_spatial_information=False, filename=None):
if filename:
self = pickleLoad(filename)
else:
self.cell_size = cell_size
self.step = step
self.scales = scales
self.embed_spatial_information = embed_spatial_information
def computePcaOnLocalDescriptors(self, images, n_image_samples=500, n_pca_components=None):
n_patches, n_features = computeDenseDescriptor(images[0]).shape
random_indexes = random.sample(range(len(images)), n_image_samples)
print 'Computing descriptors for PCA'
sys.stdout.flush()
pca_descs = np.empty((n_image_samples*n_patches, n_features), dtype=np.float32)
for i, image in enumerate(images[random_indexes]):
pca_descs[i*n_patches:(i+1)*n_patches] = computeDenseDescriptor(image, scales=5)
print 'Computing PCA'
sys.stdout.flush()
self.pca = PCA(n_components=n_pca_components, copy=False)
self.pca.fit(pca_descs)
print 'PCA computation done'
sys.stdout.flush()
def computeGMM(self, images, n_image_samples=None, n_threads=8):
if not n_image_samples or n_image_samples <= 0:
n_image_samples = images.shape[0]
random_indexes = range(n_image_samples)
else:
random_indexes = random.sample(range(len(images)), n_image_samples)
print 'Computing descriptors for GMM'
sys.stdout.flush()
n_patches = computeDenseDescriptor(images[0]).shape[0]
gmm_descs_filename = "cache/fisher_vectors/gmm_descs.mmap"
descriptors = parallelize(_parallelDenseDescriptorComputation, images[random_indexes], (n_image_samples*n_patches, self.pca.n_components_), np.float32, args=[n_patches, self.pca], output_file=gmm_descs_filename, n_jobs=n_threads, load_as_array=True)
print 'Computing GMM'
sys.stdout.flush()
self.gmm = GMM(n_components=512, n_threads=n_threads)
self.gmm.fit(descriptors)
print 'GMM computation done'
sys.stdout.flush()
def computeFisherVector(self, patches, improved=True):
K = self.gmm.n_components
N, d = patches.shape
vector = np.empty((2*K, d), dtype=np.float32)
soft_assignments = self.gmm.computeResponsabilities(patches)
squared_patches = patches ** 2
for k in range(K):
S_0 = soft_assignments[:,k].mean()
S_1 = (soft_assignments[:,k,np.newaxis] * patches).mean(axis=0)
S_2 = (soft_assignments[:,k,np.newaxis] * squared_patches).mean(axis=0)
vector[k] = (S_1 - self.gmm.means_[k]*S_0) / (np.sqrt(self.gmm.weights_[k] * self.gmm.covars_[k]))
vector[K+k] = (S_2 - 2*self.gmm.means_[k]*S_1 + (self.gmm.means_[k]**2-self.gmm.covars_[k]**2)*S_0) / (np.sqrt(2*self.gmm.weights_[k]) * self.gmm.covars_[k])
vector = vector.ravel()
if improved:
# Signed square-rooting
vector = np.sign(vector) * np.sqrt(np.abs(vector))
# L2 normalization
vector /= np.linalg.norm(vector)
return vector
def yaelFV(self, patches, improved=True):
K = self.gmm.n_components
N, d = patches.shape
flags = yael.GMM_FLAGS_MU | yael.GMM_FLAGS_SIGMA
v = yael.numpy_to_fvec(patches)
out = yael.fvec_new_0(2*K*d)
self.gmm.initYaelGmm()
yael.gmm_fisher(patches.shape[0], v, self.gmm.yael_gmm, flags, out)
vector = yael.fvec_to_numpy_acquire(out, 2*K*d)
if improved:
# Signed square-rooting
vector = np.sign(vector) * np.sqrt(np.abs(vector))
# L2 normalization
vector /= np.linalg.norm(vector)
return vector
def _parallelDenseDescriptorComputation(data, output, i, n_patches, pca=None, embed_spatial_information=False):
output[i*n_patches:(i+1)*n_patches] = computeDenseDescriptor(data[i], pca=pca, embed_spatial_information=embed_spatial_information)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Perform Fisher Vectors learning and computation')
parser.add_argument('command', choices=['pca_learning', 'gmm_learning'], help='command to execute')
#parser.add_argument('data', help='data to use for computations')
parser.add_argument('-i', dest='input_file', default='fisher_vector.pkl', help='previously learnt (or partially learnt) fisher vector models')
parser.add_argument('-o', dest='output_file', default='fisher_vector.pkl', help='where to write computations results')
parser.add_argument('-j', dest='n_threads', type=int, default=1, help='number of threads to use')
args = parser.parse_args()
base_path = "/rex/store1/home/tlorieul/"
training_set = loadDevData(filename=(base_path + 'lfw/peopleDevTrain.txt'), mapping_filename=(base_path + 'lfw/mapping.txt'))
data = np.load(base_path + 'lfw/lfwa.npy')
training_data = data[training_set]
if args.command == 'pca_learning':
fisher_vectors = FisherVectors(scales=5)
fisher_vectors.computePcaOnLocalDescriptors(training_data, n_pca_components=20)
pickleSave(args.output_file, fisher_vectors)
elif args.command == 'gmm_learning':
fisher_vectors = pickleLoad(args.input_file)
fisher_vectors.computeGMM(training_data, n_threads=args.n_threads)
pickleSave(args.output_file, fisher_vectors)
|
lgpl-3.0
|
Lawrence-Liu/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
283
|
1928
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
roxyboy/scikit-learn
|
examples/calibration/plot_compare_calibration.py
|
241
|
5008
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
webmasterraj/GaSiProMo
|
flask/lib/python2.7/site-packages/pandas/core/ops.py
|
2
|
41122
|
"""
Arithmetic operations for PandasObjects
This is not a public API.
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
import numpy as np
import pandas as pd
from pandas import compat, lib, tslib
import pandas.index as _index
from pandas.util.decorators import Appender
import pandas.core.common as com
import pandas.computation.expressions as expressions
from pandas.core.common import(bind_method, is_list_like, notnull, isnull,
_values_from_object, _maybe_match_name)
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _create_methods(arith_method, radd_func, comp_method, bool_method,
use_numexpr, special=False, default_axis='columns'):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
# NOTE: Only frame cares about default_axis, specifically: special methods
# have default axis None, whereas flex methods have default axis 'columns'
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
else:
op = lambda x: None
if special:
def names(x):
if x[-1] == "_":
return "__%s_" % x
else:
return "__%s__" % x
else:
names = lambda x: x
radd_func = radd_func or operator.add
# Inframe, all special methods have default_axis=None, flex methods have
# default_axis set to the default (columns)
new_methods = dict(
add=arith_method(operator.add, names('add'), op('+'),
default_axis=default_axis),
radd=arith_method(radd_func, names('radd'), op('+'),
default_axis=default_axis),
sub=arith_method(operator.sub, names('sub'), op('-'),
default_axis=default_axis),
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf),
# Causes a floating point exception in the tests when numexpr
# enabled, so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
default_axis=default_axis, fill_zeros=np.nan),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
default_axis=default_axis, reversed=True),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
default_axis=default_axis, reversed=True),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
names('rtruediv'), op('/'), truediv=True,
fill_zeros=np.inf, default_axis=default_axis,
reversed=True),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf,
reversed=True),
rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'),
default_axis=default_axis, reversed=True),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
default_axis=default_axis, fill_zeros=np.nan,
reversed=True),
)
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
# Comp methods never had a default axis set
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
ge=comp_method(operator.ge, names('ge'), op('>=')),
))
if bool_method:
new_methods.update(dict(
and_=bool_method(operator.and_, names('and_'), op('&')),
or_=bool_method(operator.or_, names('or_'), op('|')),
# For some reason ``^`` wasn't used in original.
xor=bool_method(operator.xor, names('xor'), op('^')),
rand_=bool_method(lambda x, y: operator.and_(y, x),
names('rand_'), op('&')),
ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor'), op('^'))
))
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
def add_methods(cls, new_methods, force, select, exclude):
if select and exclude:
raise TypeError("May only pass either select or exclude")
methods = new_methods
if select:
select = set(select)
methods = {}
for key, method in new_methods.items():
if key in select:
methods[key] = method
if exclude:
for k in exclude:
new_methods.pop(k, None)
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
#----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``operator.add`` for compatibility
comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or operator.add
# in frame, special methods have default_axis = None, comp methods use
# 'columns'
new_methods = _create_methods(arith_method, radd_func, comp_method,
bool_method, use_numexpr, default_axis=None,
special=True)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(result.reindex_like(self,copy=False)._data,
verify_is_copy=False)
return self
return f
new_methods.update(dict(
__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]),
))
if not compat.PY3:
new_methods["__idiv__"] = new_methods["__div__"]
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None,
flex_comp_method=None, flex_bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
flex_arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``lambda x, y: operator.add(y, x)`` for
compatibility
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or (lambda x, y: operator.add(y, x))
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(
flex_arith_method, radd_func, flex_comp_method, flex_bool_method,
use_numexpr, default_axis='columns', special=False)
new_methods.update(dict(
multiply=new_methods['mul'],
subtract=new_methods['sub'],
divide=new_methods['div']
))
# opt out of bool flex methods for now
for k in ('ror_', 'rxor', 'rand_'):
if k in new_methods:
new_methods.pop(k)
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
class _TimeOp(object):
"""
Wrapper around Series datetime/time/timedelta arithmetic operations.
Generally, you should use classmethod ``maybe_convert_for_time_op`` as an
entry point.
"""
fill_value = tslib.iNaT
wrap_results = staticmethod(lambda x: x)
dtype = None
def __init__(self, left, right, name):
self.name = name
# need to make sure that we are aligning the data
if isinstance(left, pd.Series) and isinstance(right, pd.Series):
left, right = left.align(right,copy=False)
self.left = left
self.right = right
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
self.is_timedelta_lhs = com.is_timedelta64_dtype(left)
self.is_datetime_lhs = com.is_datetime64_dtype(left)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
self.is_datetime_rhs = com.is_datetime64_dtype(rvalues)
self.is_timedelta_rhs = com.is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self._validate()
self._convert_for_datetime(lvalues, rvalues)
def _validate(self):
# timedelta and integer mul/div
if (self.is_timedelta_lhs and self.is_integer_rhs) or\
(self.is_integer_lhs and self.is_timedelta_rhs):
if self.name not in ('__truediv__', '__div__', '__mul__'):
raise TypeError("can only operate on a timedelta and an "
"integer for division, but the operator [%s]"
"was passed" % self.name)
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if self.name != '__sub__':
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
" passed" % self.name)
# 2 timedeltas
elif self.is_timedelta_lhs and self.is_timedelta_rhs:
if self.name not in ('__div__', '__truediv__', '__add__',
'__sub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
" operator [%s] was passed" % self.name)
# datetime and timedelta
elif self.is_datetime_lhs and self.is_timedelta_rhs:
if self.name not in ('__add__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of"
" a timedelta for addition and subtraction, "
" but the operator [%s] was passed" %
self.name)
elif self.is_timedelta_lhs and self.is_datetime_rhs:
if self.name != '__add__':
raise TypeError("can only operate on a timedelta and"
" a datetime for addition, but the operator"
" [%s] was passed" % self.name)
else:
raise TypeError('cannot operate on a series with out a rhs '
'of a series/ndarray of type datetime64[ns] '
'or a timedelta')
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
coerce = True
if not is_list_like(values):
values = np.array([values])
inferred_type = lib.infer_dtype(values)
if inferred_type in ('datetime64', 'datetime', 'date', 'time'):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (other is not None and other.dtype == 'timedelta64[ns]' and
all(isnull(v) for v in values)):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = tslib.iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
elif not (isinstance(values, (np.ndarray, pd.Series)) and
com.is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
values = to_timedelta(values, coerce=coerce)
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
elif isinstance(values[0], pd.DateOffset):
# handle DateOffsets
os = np.array([getattr(v, 'delta', None) for v in values])
mask = isnull(os)
if mask.any():
raise TypeError("cannot use a non-absolute DateOffset in "
"datetime/timedelta operations [{0}]".format(
', '.join([com.pprint_thing(v)
for v in values[mask]])))
values = to_timedelta(os, coerce=coerce)
elif inferred_type == 'floating':
# all nan, so ok, use the other dtype (e.g. timedelta or datetime)
if isnull(values).all():
values = np.empty(values.shape, dtype=other.dtype)
values[:] = tslib.iNaT
else:
raise TypeError(
'incompatible type [{0}] for a datetime/timedelta '
'operation'.format(np.array(values).dtype))
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
return values
def _convert_for_datetime(self, lvalues, rvalues):
mask = None
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
self.dtype = 'timedelta64[ns]'
else:
self.dtype = 'datetime64[ns]'
mask = isnull(lvalues) | isnull(rvalues)
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
mask = isnull(lvalues) | isnull(rvalues)
lvalues = lvalues.astype(np.int64)
rvalues = rvalues.astype(np.int64)
# time delta division -> unit less
# integer gets converted to timedelta in np < 1.6
if (self.is_timedelta_lhs and self.is_timedelta_rhs) and\
not self.is_integer_rhs and\
not self.is_integer_lhs and\
self.name in ('__div__', '__truediv__'):
self.dtype = 'float64'
self.fill_value = np.nan
lvalues = lvalues.astype(np.float64)
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
if mask is not None:
if mask.any():
def f(x):
x = np.array(x, dtype=self.dtype)
np.putmask(x, mask, self.fill_value)
return x
self.wrap_results = f
self.lvalues = lvalues
self.rvalues = rvalues
@classmethod
def maybe_convert_for_time_op(cls, left, right, name):
"""
if ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
that stores all the required values. Otherwise, it will generate
either a ``NotImplementedError`` or ``None``, indicating that the
operation is unsupported for datetimes (e.g., an unsupported r_op) or
that the data is not the right type for time ops.
"""
# decide if we can do it
is_timedelta_lhs = com.is_timedelta64_dtype(left)
is_datetime_lhs = com.is_datetime64_dtype(left)
if not (is_datetime_lhs or is_timedelta_lhs):
return None
# rops are allowed. No need for special checks, just strip off
# r part.
if name.startswith('__r'):
name = "__" + name[3:]
return cls(left, right, name)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None,
default_axis=None, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, pd.Series, pd.Index)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
mask = notnull(x) & notnull(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
else:
raise TypeError("{typ} cannot perform the operation {op}".format(typ=type(x).__name__,op=str_rep))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
def wrapper(left, right, name=name):
if isinstance(right, pd.DataFrame):
return NotImplemented
time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name)
if time_converted is None:
lvalues, rvalues = left, right
dtype = None
wrap_results = lambda x: x
elif time_converted == NotImplemented:
return NotImplemented
else:
left, right = time_converted.left, time_converted.right
lvalues, rvalues = time_converted.lvalues, time_converted.rvalues
dtype = time_converted.dtype
wrap_results = time_converted.wrap_results
if isinstance(rvalues, pd.Series):
rindex = getattr(rvalues,'index',rvalues)
name = _maybe_match_name(left, rvalues)
lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
if left.index.equals(rindex):
index = left.index
else:
index, lidx, ridx = left.index.join(rindex, how='outer',
return_indexers=True)
if lidx is not None:
lvalues = com.take_1d(lvalues, lidx)
if ridx is not None:
rvalues = com.take_1d(rvalues, ridx)
arr = na_op(lvalues, rvalues)
return left._constructor(wrap_results(arr), index=index,
name=name, dtype=dtype)
else:
# scalars
if hasattr(lvalues, 'values'):
lvalues = lvalues.values
return left._constructor(wrap_results(na_op(lvalues, rvalues)),
index=left.index, name=left.name,
dtype=dtype)
return wrapper
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if com.is_categorical_dtype(x):
return op(x,y)
elif com.is_categorical_dtype(y) and not lib.isscalar(y):
return op(y,x)
if x.dtype == np.object_:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if y.dtype != np.object_:
result = lib.vec_compare(x, y.astype(np.object_), op)
else:
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
else:
try:
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except (AttributeError):
result = op(x, y)
return result
def wrapper(self, other):
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError('Series lengths must match to compare')
return self._constructor(na_op(self.values, other.values),
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
if len(self) != len(other):
raise ValueError('Lengths must match to compare')
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not com.is_categorical_dtype(self):
msg = "Cannot compare a Categorical for op {op} with Series of dtype {typ}.\n"\
"If you want to compare values, use 'series <op> np.asarray(other)'."
raise TypeError(msg.format(op=op,typ=self.dtype))
mask = isnull(self)
values = self.get_values()
other = _index.convert_scalar(values,_values_from_object(other))
if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
values = values.view('i8')
# scalars
res = na_op(values, other)
if np.isscalar(res):
raise TypeError('Could not compare %s type with Series'
% type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name,
dtype='bool')
# mask out the invalids
if mask.any():
res[mask] = masker
return res
return wrapper
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if (x.dtype == np.bool_ and
y.dtype == np.bool_): # pragma: no cover
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = com.is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
is_other_int_dtype = com.is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
return filler(self._constructor(na_op(self.values, other.values),
index=self.index,
name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = fill_int if is_self_int_dtype and com.is_integer_dtype(np.asarray(other)) else fill_bool
return filler(self._constructor(na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
def _radd_compat(left, right):
radd = lambda x, y: y + x
# GH #353, NumPy 1.5.1 workaround
try:
output = radd(left, right)
except TypeError:
raise
return output
def _flex_method_SERIES(op, name, str_rep, default_axis=None,
fill_zeros=None, **eval_kwargs):
doc = """
Binary operator %s with support to substitute a fill_value for missing data
in one of the inputs
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None (NaN)
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : Series
""" % name
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
self._get_axis_number(axis)
if isinstance(other, pd.Series):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, pd.Series, list, tuple)):
if len(other) != len(self):
raise ValueError('Lengths must be equal')
return self._binop(self._constructor(other, self.index), op,
level=level, fill_value=fill_value)
else:
return self._constructor(op(self.values, other),
self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,
radd_func=_radd_compat,
flex_comp_method=_comp_method_SERIES)
series_special_funcs = dict(arith_method=_arith_method_SERIES,
radd_func=_radd_compat,
comp_method=_comp_method_SERIES,
bool_method=_bool_method_SERIES)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
"""
def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(
op, str_rep, x, y, raise_on_error=True, **eval_kwargs)
except TypeError:
xrav = x.ravel()
if isinstance(y, (np.ndarray, pd.Series)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
xrav = xrav[mask]
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
result[mask] = op(xrav, yrav)
elif hasattr(x,'size'):
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
xrav = xrav[mask]
if np.prod(xrav.shape):
result[mask] = op(xrav, y)
else:
raise TypeError("cannot perform operation {op} between objects "
"of type {x} and {y}".format(op=name,x=type(x),y=type(y)))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
@Appender(_arith_doc_FRAME % name)
def f(self, other, axis=default_axis, level=None, fill_value=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._combine_frame(other, na_op, fill_value, level)
elif isinstance(other, pd.Series):
return self._combine_series(other, na_op, fill_value, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
# TODO: Get all of these to use _constructor_sliced
# casted = self._constructor_sliced(other, index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other, index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value, axis, level)
elif isinstance(other, np.ndarray) and other.ndim: # skips np scalar
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
# casted = self._constructor_sliced(other,
# index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other,
# index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value,
axis, level)
elif other.ndim == 2:
# casted = self._constructor(other, index=self.index,
# columns=self.columns)
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._combine_frame(casted, na_op, fill_value, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
# Masker unused for now
def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
masker=False):
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
if isinstance(y, (np.ndarray, pd.Series)):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for flexible comparison methods %s' % name)
def f(self, other, axis=default_axis, level=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._flex_compare_frame(other, na_op, str_rep, level)
elif isinstance(other, pd.Series):
return self._combine_series(other, na_op, None, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif isinstance(other, np.ndarray):
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif other.ndim == 2:
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._flex_compare_frame(casted, na_op, str_rep, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
elif isinstance(other, pd.Series):
return self._combine_series_infer(other, func)
else:
# straight boolean comparisions we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func, raise_on_error=False)
return res.fillna(True).astype(bool)
f.__name__ = name
return f
frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
flex_comp_method=_flex_comp_method_FRAME)
frame_special_funcs = dict(arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
comp_method=_comp_method_FRAME,
bool_method=_arith_method_FRAME)
def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
default_axis=None, **eval_kwargs):
# copied from Series na_op above, but without unnecessary branch for
# non-scalar
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
# TODO: might need to find_common_type here?
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
# work only for scalars
def f(self, other):
if not np.isscalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
return self._combine(other, op)
f.__name__ = name
return f
def _comp_method_PANEL(op, name, str_rep=None, masker=False):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
pd.Series)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
|
gpl-2.0
|
Jwely/pivpr
|
py/config.py
|
1
|
2620
|
__author__ = 'Jwely'
from os.path import join, dirname, abspath
from matplotlib import cm
from matplotlib import rcParams
config_root = dirname(abspath(__file__))
# this prevents plt.tight_layout() from crowding axis labels off the edges of the plot.
rcParams.update({'figure.autolayout': True})
# aerodynamic params to assume
AIR_DENSITY = 1.225 # kg/m^3
AIR_KINEMATIC_VISCOSITY = 15.68e-6 # m^2 / s
AIR_DYNAMIC_VISCOSITY = 18.46e-6 # kg / m*s
# resource filepaths and directories
EXPERIMENT_TABLE_PATH = abspath(join(config_root, "piv/dat/experiment_table.csv"))
PIV_PICKLE_DIR = abspath(join(config_root, "piv/pickles"))
DATA_FULL_DIR = abspath(join(config_root, "../data_full"))
CALIBRATION_DIR = abspath(join(config_root, "uncertainty/cal_data"))
SYNTHESIZED_PIV_DIR = abspath(join(config_root, "uncertainty/artificial_images"))
TEX_FIGURE_DIR = abspath(join(config_root, "../texdocs/figs"))
TEX_TABLE_DIR = abspath(join(config_root, "../texdocs/tables"))
TEX_MAIN_PATH = abspath(join(config_root, "../texdocs/main.tex"))
# when pickling, save only these dynamic components (saves disk space), discard the others
DYNAMIC_INCLUDES = ['U', 'V', 'ctke', 'r', 't', 'w', 'rt', 'rw', 'tw']
# statistics variables
DEFAULT_MIN_POINTS = 15 # minimum number of points required to consider a data point as good
# global plotting variables
CONTOUR_DEFAULT_LEVELS = 256 # number of discreet colors to use on contour plots
CONTOUR_DEFAULT_CMAP = cm.jet # default color ramp to use on contour plots
CONTOUR_DIVERGE_CMAP = cm.PRGn # default color ramp for diverging contour plots (things centered about zero)
CONTOUR_DEFAULT_RRANGE = (0, 50) # default radius range to subset contour plots by
SCATTER_DEFAULT_CMAP = cm.jet # default colormap to use on scatter plots with third variable
SCATTER_DEFAULT_COLOR = 'navy' # default color used on scatter plots with just one variable
DEFAULT_CORE_MARKER_COLOR = 'k' # default color of lines and crosshair used to mark the core boundary
SCATTER_DEFAULT_MARKER = 'x' # marker to use on scatter plots
SCATTER_DEFAULT_RLIM = 100 # default plot
VRANGE_DEFAULT = (1, 99) # default percentile values for defining color ramp boundaries
SCATTER_VRANGE_DFAULT = (5, 95) # default percentile value for defining scatter plot y axis
DEFAULT_DPI = 300 # default dots per inch
DEFAULT_TITLE_SIZE = 18 # font size for titles
# attributes of the dataset
SAMPLING_RATE = 1 # the sampling rate in Hz. (1/time between vector fields)
|
mit
|
djgagne/scikit-learn
|
sklearn/cluster/dbscan_.py
|
92
|
12380
|
# -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
|
bsd-3-clause
|
quantopian/odo
|
odo/backends/dask.py
|
4
|
2863
|
from __future__ import absolute_import, division, print_function
from collections import Iterator
import numpy as np
import pandas as pd
from datashape.dispatch import dispatch
from datashape import from_numpy, var
from dask.array.core import Array, from_array
from dask.bag.core import Bag
import dask.bag as db
from dask.compatibility import long
import dask.dataframe as dd
from odo import append, chunks, convert, discover, TextFile
from ..utils import filter_kwargs
@discover.register(Array)
def discover_dask_array(a, **kwargs):
return from_numpy(a.shape, a.dtype)
@discover.register(dd.Series)
@discover.register(dd.DataFrame)
def discover_dask_dataframe(df):
return var * discover(df.head()).measure
arrays = [np.ndarray]
try:
import h5py
except ImportError:
pass
else:
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
try:
import bcolz
except ImportError:
pass
else:
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
@convert.register(Array, tuple(arrays), cost=1.)
def array_to_dask(x, name=None, chunks=None, **kwargs):
if chunks is None:
raise ValueError("chunks cannot be None")
return from_array(x, chunks=chunks, name=name,
**filter_kwargs(from_array, kwargs))
@convert.register(np.ndarray, Array, cost=10.)
def dask_to_numpy(x, **kwargs):
return np.array(x)
@convert.register(pd.DataFrame, dd.DataFrame, cost=200)
@convert.register(pd.Series, dd.Series, cost=200)
@convert.register(float, Array, cost=200)
def dask_to_other(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
arr.store(out)
return out
@convert.register(Iterator, Bag)
def bag_to_iterator(x, **kwargs):
return iter(x)
@convert.register(Bag, chunks(TextFile))
def bag_to_iterator(x, **kwargs):
return db.read_text([tf.path for tf in x])
@convert.register(Bag, list)
def bag_to_iterator(x, **kwargs):
return db.from_sequence(x, **filter_kwargs(db.from_sequence, kwargs))
@convert.register(dd.DataFrame, pd.DataFrame, cost=1.)
def pandas_dataframe_to_dask_dataframe(x, npartitions=None, **kwargs):
if npartitions is None:
raise ValueError("npartitions cannot be None")
return dd.from_pandas(x, npartitions=npartitions,
**filter_kwargs(dd.from_pandas, kwargs))
|
bsd-3-clause
|
iLampard/alphaware
|
alphaware/examples/utils_funcs.py
|
1
|
1380
|
# -*- coding: utf-8 -*-
from alphaware.utils import (get_tiaocang_date,
ensure_noncumul_return,
ensure_cumul_return)
from alphaware.enums import FreqType
from alphaware.const import RETURN
print get_tiaocang_date(start_date='2016-01-01', end_date='2016-3-31', freq=FreqType.EOM)
# [datetime.datetime(2016, 1, 29, 0, 0), datetime.datetime(2016, 2, 29, 0, 0), datetime.datetime(2016, 3, 31, 0, 0)]
print get_tiaocang_date(start_date='2017/1/1', end_date='2017/2/1', freq=FreqType.EOW, date_format='%Y/%m/%d')
# [datetime.datetime(2017, 1, 6, 0, 0), datetime.datetime(2017, 1, 13, 0, 0), datetime.datetime(2017, 1, 20, 0, 0),
# datetime.datetime(2017, 1, 26, 0, 0)]
import pandas as pd
from argcheck import preprocess
from alphaware.utils import (ensure_noncumul_return,
ensure_cumul_return)
from alphaware.const import RETURN
from alphaware.enums import ReturnType
# 定义一个累积收益率序列
ret = RETURN(data=pd.Series([1.0, 1.0, 1.05, 1.1],
index=['2010-01-02', '2010-01-03', '2010-01-04', '2010-01-05']), type=ReturnType.Cumul)
# 假如用使用日频收益序列,只要结合argcheck.preprocess和ensure_noncumul_return即可
@preprocess(data=ensure_noncumul_return)
def mean_return(data):
return data.mean()
print (mean_return(ret))
|
apache-2.0
|
djhshih/genomic
|
utils/genompy/genompy/plot/cn.py
|
1
|
6009
|
#!/usr/bin/env python3
import numpy as np
import matplotlib
import matplotlib.lines as lines
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
from .. import cn
def plot_sample_profile(x, y, seg_x=None, seg_y=None, subplot_spec=None, ax=None, hide_xaxis=False, yref=0, downsample=1):
ax = plt.subplot(subplot_spec, sharex=ax)
# plot reference line
ax.axhline(yref, color='#cccccc', lw=8)
if downsample > 1:
marker = '.'
else:
marker = 'o'
# plot data points
ax.plot(x[::downsample], y[::downsample], 'k', marker=marker, ls='')
# plot segments
if seg_x is not None and seg_y is not None:
for i in range(len(seg_y)):
# FIXME expose colour thresholds as parameters
if seg_y[i] > 0.2:
color = '#bd0026'
elif seg_y[i] < -0.2:
color = '#0868ac'
else:
color = '#666666'
line = lines.Line2D(seg_x[i, :], [seg_y[i], seg_y[i]], color=color, lw=2)
ax.add_line(line)
ax.yaxis.tick_left()
if hide_xaxis:
for side in ('right', 'top', 'bottom'):
ax.spines[side].set_color('none')
ax.xaxis.set(visible=False)
return ax
def coordinate_kbp(x, pos):
return '{} kbp'.format(x/1e3)
def coordinate_mbp(x, pos):
return '{} Mbp'.format(x/1e6)
def draw_xaxis(subplot_spec, ax):
ax2 = plt.subplot(subplot_spec, sharex=ax)
ax2.spines['right'].set_color('none')
ax2.spines['left'].set_color('none')
ax2.spines['top'].set_color('none')
ax2.set(yticks=[])
ax2.xaxis.tick_bottom()
def plot_mrna(gene_region, ax, top):
h = 0.05
# draw intron
line = lines.Line2D([gene_region.start, gene_region.end], [top - h/2, top - h/2], color='#fd8d3c', lw=2, zorder=0)
ax.add_line(line)
# draw exons
for exon in gene_region.exons:
s = exon.start
e = exon.end
x = [s, e, e, s]
y = [top-h, top-h, top, top]
ax.fill(x, y, '#fd8d3c', lw=2, ec='#fd8d3c')
# draw coding regions
for cds in gene_region.coding_exons:
s = cds.start
e = cds.end
x = [s, e, e, s]
y = [top-h, top-h, top, top]
ax.fill(x, y, '#f03b20', lw=2, ec='#f03b20')
# annotate gene
size = gene_region.end - gene_region.start + 1
mid = gene_region.start + size/2
ax.text(mid, -0.3, gene_region.name, horizontalalignment='right', clip_on=True, rotation=30)
def plot_mrnas(genes, subplot_spec, ax):
ax = plt.subplot(subplot_spec, sharex=ax)
#top = -1
for g in genes:
if g.strand == '+':
#if top < 0:
top = 0
else:
top = -0.1
plot_mrna(g, ax, top)
# remove ticks and boxes
ax.xaxis.set(visible=False)
ax.yaxis.set(visible=False)
ax.set_frame_on(False)
# set axis limits
ax.set_ylim(bottom=-0.8, top=0)
return ax
def get_gene_regions(genomicRegion):
genes = []
for g in genomicRegion.genes:
genes.append(cn.GeneRegion(g, genomicRegion.geneDb))
# sort genes by starting position
genes.sort(key=lambda g: g.start)
return genes
from matplotlib.transforms import Bbox, TransformedBbox, blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector, BboxConnectorPatch
def connect_bboxes(bbox1, bbox2, \
loc1a, loc2a, loc1b, loc2b, \
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches['alpha'] = prop_patches.get('alpha', 1)*0.1
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2, loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b, **prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect(ax1, ax2, xlim, **kwargs):
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xlim[0], 0, xlim[1], 1)
tbbox1 = TransformedBbox(bbox, trans1)
tbbox2 = TransformedBbox(bbox, trans2)
prop_patches = kwargs.copy()
prop_patches['ec'] = 'none'
prop_patches['alpha'] = 0.1
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bboxes(tbbox1, tbbox2, loc1a=3, loc2a=2, loc1b=4, loc2b=1, prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def plot_locus(x, y, seg_x=None, seg_y=None, xlim=None, genes=None, yref=0, downsample=(1,1)):
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 0.75, 1])
fig = plt.figure()
fig.patch.set(facecolor='w')
main_ax = plot_sample_profile(x, y, seg_x, seg_y, gs[0, :], yref=yref, downsample=downsample[0])
main_ax.xaxis.set_major_formatter(ticker.FuncFormatter(coordinate_mbp))
# zoomed-in profile
ax = plot_sample_profile(x, y, seg_x, seg_y, gs[1, :], yref=yref, downsample=downsample[1])
main_ax.set_xlim(left=x[0], right=x[-1])
ax.grid(True)
genes_ax = None
if genes is not None:
genes_ax = plot_mrnas(genes, gs[2, :], ax)
if xlim is None:
xlim = x[0], x[-1]
win_size = xlim[1] - xlim[0] + 1
ax.set_xlim(left=xlim[0] - win_size*0.05, right=xlim[1] + win_size*0.05)
ax.xaxis.set_major_formatter(ticker.FuncFormatter(coordinate_mbp))
zoom_effect(main_ax, ax, xlim, lw=0, color='green')
return main_ax, ax, genes_ax
def main():
geneDatabase = cn.GeneDatabase('refGene.db')
gregion = cn.GenomicRegion('chr5:121000000-122000000', geneDatabase=geneDatabase)
unit = 1e4
x = np.arange(gregion.start, gregion.end+unit, unit)
y = np.hstack( [np.random.randn(21)+1, np.random.randn(30)-1, np.random.randn(50)+2] )
seg_x = np.matrix([ [x[0], x[20]], [x[20], x[50]], [x[50], x[-1]] ])
seg_y = np.array([1, -1, 2])
genes = get_gene_regions(gregion)
xlim = (121110000, 121510000)
main_ax, ax, genes_ax = plot_locus(x, y, seg_x, seg_y, xlim, genes)
main_ax.set(ylabel='DNA copy-number')
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
chen0031/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/_cm.py
|
70
|
375423
|
"""
Color data and pre-defined cmap objects.
This is a helper for cm.py, originally part of that file.
Separating the data (this file) from cm.py makes both easier
to deal with.
Objects visible in cm.py are the individual cmap objects ('autumn',
etc.) and a dictionary, 'datad', including all of these objects.
"""
import matplotlib as mpl
import matplotlib.colors as colors
LUTSIZE = mpl.rcParams['image.lut']
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_bone_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 1.0, 1.0))}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {'red': ((0., 1., 1.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 1.000000, 1.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 1.000000, 1.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 1.000000, 1.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 1.000000, 1.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 1.000000, 1.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 0.000000, 0.000000),(1.0, 0., 0.)),
'green': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 1.000000, 1.000000),(0.095238, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.190476, 0.000000, 0.000000),
(0.206349, 1.000000, 1.000000),(0.222222, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 0.000000, 0.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 1.000000, 1.000000),(0.476190, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.571429, 0.000000, 0.000000),
(0.587302, 1.000000, 1.000000),(0.603175, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 0.000000, 0.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.952381, 0.000000, 0.000000),
(0.968254, 1.000000, 1.000000),(0.984127, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 1.000000, 1.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 0.000000, 0.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 1.000000, 1.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 1.000000, 1.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 1.000000, 1.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 0.000000, 0.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 1.000000, 1.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 0.000000, 0.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 1.000000, 1.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 1.000000, 1.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 1.000000, 1.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 0.000000, 0.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 1.000000, 1.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 0.000000, 0.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 1.000000, 1.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 1.000000, 1.000000),(1.0, 0., 0.))}
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_prism_data = {'red': ((0., 1., 1.),(0.031746, 1.000000, 1.000000),
(0.047619, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 0.666667, 0.666667),(0.095238, 1.000000, 1.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.666667, 0.666667),
(0.190476, 1.000000, 1.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 0.000000, 0.000000),(0.253968, 0.000000, 0.000000),
(0.269841, 0.666667, 0.666667),(0.285714, 1.000000, 1.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.666667, 0.666667),
(0.380952, 1.000000, 1.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 0.666667, 0.666667),(0.476190, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.666667, 0.666667),
(0.571429, 1.000000, 1.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 0.000000, 0.000000),(0.634921, 0.000000, 0.000000),
(0.650794, 0.666667, 0.666667),(0.666667, 1.000000, 1.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.666667, 0.666667),
(0.761905, 1.000000, 1.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 0.666667, 0.666667),(0.857143, 1.000000, 1.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.666667, 0.666667),
(0.952381, 1.000000, 1.000000),(0.984127, 1.000000, 1.000000),
(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(0.031746, 1.000000, 1.000000),
(0.047619, 1.000000, 1.000000),(0.063492, 0.000000, 0.000000),
(0.095238, 0.000000, 0.000000),(0.126984, 1.000000, 1.000000),
(0.142857, 1.000000, 1.000000),(0.158730, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 1.000000, 1.000000),(0.253968, 0.000000, 0.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 1.000000, 1.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 1.000000, 1.000000),(0.444444, 0.000000, 0.000000),
(0.476190, 0.000000, 0.000000),(0.507937, 1.000000, 1.000000),
(0.523810, 1.000000, 1.000000),(0.539683, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 1.000000, 1.000000),(0.634921, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 1.000000, 1.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 1.000000, 1.000000),(0.825397, 0.000000, 0.000000),
(0.857143, 0.000000, 0.000000),(0.888889, 1.000000, 1.000000),
(0.904762, 1.000000, 1.000000),(0.920635, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.984127, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 1.000000, 1.000000),
(0.190476, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 1.000000, 1.000000),
(0.380952, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 1.000000, 1.000000),
(0.571429, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 1.000000, 1.000000),
(0.761905, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 1.000000, 1.000000),
(0.952381, 0.000000, 0.000000),(1.0, 0.0, 0.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
autumn = colors.LinearSegmentedColormap('autumn', _autumn_data, LUTSIZE)
bone = colors.LinearSegmentedColormap('bone ', _bone_data, LUTSIZE)
binary = colors.LinearSegmentedColormap('binary ', _binary_data, LUTSIZE)
cool = colors.LinearSegmentedColormap('cool', _cool_data, LUTSIZE)
copper = colors.LinearSegmentedColormap('copper', _copper_data, LUTSIZE)
flag = colors.LinearSegmentedColormap('flag', _flag_data, LUTSIZE)
gray = colors.LinearSegmentedColormap('gray', _gray_data, LUTSIZE)
hot = colors.LinearSegmentedColormap('hot', _hot_data, LUTSIZE)
hsv = colors.LinearSegmentedColormap('hsv', _hsv_data, LUTSIZE)
jet = colors.LinearSegmentedColormap('jet', _jet_data, LUTSIZE)
pink = colors.LinearSegmentedColormap('pink', _pink_data, LUTSIZE)
prism = colors.LinearSegmentedColormap('prism', _prism_data, LUTSIZE)
spring = colors.LinearSegmentedColormap('spring', _spring_data, LUTSIZE)
summer = colors.LinearSegmentedColormap('summer', _summer_data, LUTSIZE)
winter = colors.LinearSegmentedColormap('winter', _winter_data, LUTSIZE)
spectral = colors.LinearSegmentedColormap('spectral', _spectral_data, LUTSIZE)
datad = {
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'cool': _cool_data,
'copper': _copper_data,
'flag': _flag_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'pink': _pink_data,
'prism': _prism_data,
'spring': _spring_data,
'summer': _summer_data,
'winter': _winter_data,
'spectral': _spectral_data
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
_gist_earth_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.18039216101169586, 0.18039216101169586), (0.0084033617749810219,
0.22745098173618317, 0.22745098173618317), (0.012605042196810246,
0.27058824896812439, 0.27058824896812439), (0.016806723549962044,
0.31764706969261169, 0.31764706969261169), (0.021008403971791267,
0.36078432202339172, 0.36078432202339172), (0.025210084393620491,
0.40784314274787903, 0.40784314274787903), (0.029411764815449715,
0.45490196347236633, 0.45490196347236633), (0.033613447099924088,
0.45490196347236633, 0.45490196347236633), (0.037815127521753311,
0.45490196347236633, 0.45490196347236633), (0.042016807943582535,
0.45490196347236633, 0.45490196347236633), (0.046218488365411758,
0.45490196347236633, 0.45490196347236633), (0.050420168787240982,
0.45882353186607361, 0.45882353186607361), (0.054621849209070206,
0.45882353186607361, 0.45882353186607361), (0.058823529630899429,
0.45882353186607361, 0.45882353186607361), (0.063025213778018951,
0.45882353186607361, 0.45882353186607361), (0.067226894199848175,
0.45882353186607361, 0.45882353186607361), (0.071428574621677399,
0.46274510025978088, 0.46274510025978088), (0.075630255043506622,
0.46274510025978088, 0.46274510025978088), (0.079831935465335846,
0.46274510025978088, 0.46274510025978088), (0.08403361588716507,
0.46274510025978088, 0.46274510025978088), (0.088235296308994293,
0.46274510025978088, 0.46274510025978088), (0.092436976730823517,
0.46666666865348816, 0.46666666865348816), (0.09663865715265274,
0.46666666865348816, 0.46666666865348816), (0.10084033757448196,
0.46666666865348816, 0.46666666865348816), (0.10504201799631119,
0.46666666865348816, 0.46666666865348816), (0.10924369841814041,
0.46666666865348816, 0.46666666865348816), (0.11344537883996964,
0.47058823704719543, 0.47058823704719543), (0.11764705926179886,
0.47058823704719543, 0.47058823704719543), (0.12184873968362808,
0.47058823704719543, 0.47058823704719543), (0.1260504275560379,
0.47058823704719543, 0.47058823704719543), (0.13025210797786713,
0.47058823704719543, 0.47058823704719543), (0.13445378839969635,
0.47450980544090271, 0.47450980544090271), (0.13865546882152557,
0.47450980544090271, 0.47450980544090271), (0.1428571492433548,
0.47450980544090271, 0.47450980544090271), (0.14705882966518402,
0.47450980544090271, 0.47450980544090271), (0.15126051008701324,
0.47450980544090271, 0.47450980544090271), (0.15546219050884247,
0.47843137383460999, 0.47843137383460999), (0.15966387093067169,
0.47843137383460999, 0.47843137383460999), (0.16386555135250092,
0.47843137383460999, 0.47843137383460999), (0.16806723177433014,
0.47843137383460999, 0.47843137383460999), (0.17226891219615936,
0.47843137383460999, 0.47843137383460999), (0.17647059261798859,
0.48235294222831726, 0.48235294222831726), (0.18067227303981781,
0.48235294222831726, 0.48235294222831726), (0.18487395346164703,
0.48235294222831726, 0.48235294222831726), (0.18907563388347626,
0.48235294222831726, 0.48235294222831726), (0.19327731430530548,
0.48235294222831726, 0.48235294222831726), (0.1974789947271347,
0.48627451062202454, 0.48627451062202454), (0.20168067514896393,
0.48627451062202454, 0.48627451062202454), (0.20588235557079315,
0.48627451062202454, 0.48627451062202454), (0.21008403599262238,
0.48627451062202454, 0.48627451062202454), (0.2142857164144516,
0.48627451062202454, 0.48627451062202454), (0.21848739683628082,
0.49019607901573181, 0.49019607901573181), (0.22268907725811005,
0.49019607901573181, 0.49019607901573181), (0.22689075767993927,
0.49019607901573181, 0.49019607901573181), (0.23109243810176849,
0.49019607901573181, 0.49019607901573181), (0.23529411852359772,
0.49019607901573181, 0.49019607901573181), (0.23949579894542694,
0.49411764740943909, 0.49411764740943909), (0.24369747936725616,
0.49411764740943909, 0.49411764740943909), (0.24789915978908539,
0.49411764740943909, 0.49411764740943909), (0.25210085511207581,
0.49411764740943909, 0.49411764740943909), (0.25630253553390503,
0.49411764740943909, 0.49411764740943909), (0.26050421595573425,
0.49803921580314636, 0.49803921580314636), (0.26470589637756348,
0.49803921580314636, 0.49803921580314636), (0.2689075767993927,
0.49803921580314636, 0.49803921580314636), (0.27310925722122192,
0.49803921580314636, 0.49803921580314636), (0.27731093764305115,
0.49803921580314636, 0.49803921580314636), (0.28151261806488037,
0.50196081399917603, 0.50196081399917603), (0.28571429848670959,
0.49411764740943909, 0.49411764740943909), (0.28991597890853882,
0.49019607901573181, 0.49019607901573181), (0.29411765933036804,
0.48627451062202454, 0.48627451062202454), (0.29831933975219727,
0.48235294222831726, 0.48235294222831726), (0.30252102017402649,
0.47843137383460999, 0.47843137383460999), (0.30672270059585571,
0.47058823704719543, 0.47058823704719543), (0.31092438101768494,
0.46666666865348816, 0.46666666865348816), (0.31512606143951416,
0.46274510025978088, 0.46274510025978088), (0.31932774186134338,
0.45882353186607361, 0.45882353186607361), (0.32352942228317261,
0.45098039507865906, 0.45098039507865906), (0.32773110270500183,
0.44705882668495178, 0.44705882668495178), (0.33193278312683105,
0.44313725829124451, 0.44313725829124451), (0.33613446354866028,
0.43529412150382996, 0.43529412150382996), (0.3403361439704895,
0.43137255311012268, 0.43137255311012268), (0.34453782439231873,
0.42745098471641541, 0.42745098471641541), (0.34873950481414795,
0.42352941632270813, 0.42352941632270813), (0.35294118523597717,
0.41568627953529358, 0.41568627953529358), (0.3571428656578064,
0.4117647111415863, 0.4117647111415863), (0.36134454607963562,
0.40784314274787903, 0.40784314274787903), (0.36554622650146484,
0.40000000596046448, 0.40000000596046448), (0.36974790692329407,
0.3960784375667572, 0.3960784375667572), (0.37394958734512329,
0.39215686917304993, 0.39215686917304993), (0.37815126776695251,
0.38431373238563538, 0.38431373238563538), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.37647059559822083, 0.37647059559822083), (0.39075630903244019,
0.36862745881080627, 0.36862745881080627), (0.39495798945426941,
0.364705890417099, 0.364705890417099), (0.39915966987609863,
0.36078432202339172, 0.36078432202339172), (0.40336135029792786,
0.35294118523597717, 0.35294118523597717), (0.40756303071975708,
0.3490196168422699, 0.3490196168422699), (0.4117647111415863,
0.34509804844856262, 0.34509804844856262), (0.41596639156341553,
0.33725491166114807, 0.33725491166114807), (0.42016807198524475,
0.3333333432674408, 0.3333333432674408), (0.42436975240707397,
0.32941177487373352, 0.32941177487373352), (0.4285714328289032,
0.32156863808631897, 0.32156863808631897), (0.43277311325073242,
0.31764706969261169, 0.31764706969261169), (0.43697479367256165,
0.31372550129890442, 0.31372550129890442), (0.44117647409439087,
0.30588236451148987, 0.30588236451148987), (0.44537815451622009,
0.30196079611778259, 0.30196079611778259), (0.44957983493804932,
0.29803922772407532, 0.29803922772407532), (0.45378151535987854,
0.29019609093666077, 0.29019609093666077), (0.45798319578170776,
0.28627452254295349, 0.28627452254295349), (0.46218487620353699,
0.27843138575553894, 0.27843138575553894), (0.46638655662536621,
0.27450981736183167, 0.27450981736183167), (0.47058823704719543,
0.27843138575553894, 0.27843138575553894), (0.47478991746902466,
0.28235295414924622, 0.28235295414924622), (0.47899159789085388,
0.28235295414924622, 0.28235295414924622), (0.48319327831268311,
0.28627452254295349, 0.28627452254295349), (0.48739495873451233,
0.28627452254295349, 0.28627452254295349), (0.49159663915634155,
0.29019609093666077, 0.29019609093666077), (0.49579831957817078,
0.29411765933036804, 0.29411765933036804), (0.5, 0.29411765933036804,
0.29411765933036804), (0.50420171022415161, 0.29803922772407532,
0.29803922772407532), (0.50840336084365845, 0.29803922772407532,
0.29803922772407532), (0.51260507106781006, 0.30196079611778259,
0.30196079611778259), (0.51680672168731689, 0.30196079611778259,
0.30196079611778259), (0.52100843191146851, 0.30588236451148987,
0.30588236451148987), (0.52521008253097534, 0.30980393290519714,
0.30980393290519714), (0.52941179275512695, 0.30980393290519714,
0.30980393290519714), (0.53361344337463379, 0.31372550129890442,
0.31372550129890442), (0.5378151535987854, 0.31372550129890442,
0.31372550129890442), (0.54201680421829224, 0.31764706969261169,
0.31764706969261169), (0.54621851444244385, 0.32156863808631897,
0.32156863808631897), (0.55042016506195068, 0.32156863808631897,
0.32156863808631897), (0.55462187528610229, 0.32156863808631897,
0.32156863808631897), (0.55882352590560913, 0.32549020648002625,
0.32549020648002625), (0.56302523612976074, 0.32549020648002625,
0.32549020648002625), (0.56722688674926758, 0.32549020648002625,
0.32549020648002625), (0.57142859697341919, 0.32941177487373352,
0.32941177487373352), (0.57563024759292603, 0.32941177487373352,
0.32941177487373352), (0.57983195781707764, 0.32941177487373352,
0.32941177487373352), (0.58403360843658447, 0.3333333432674408,
0.3333333432674408), (0.58823531866073608, 0.3333333432674408,
0.3333333432674408), (0.59243696928024292, 0.3333333432674408,
0.3333333432674408), (0.59663867950439453, 0.33725491166114807,
0.33725491166114807), (0.60084033012390137, 0.33725491166114807,
0.33725491166114807), (0.60504204034805298, 0.33725491166114807,
0.33725491166114807), (0.60924369096755981, 0.34117648005485535,
0.34117648005485535), (0.61344540119171143, 0.34117648005485535,
0.34117648005485535), (0.61764705181121826, 0.34117648005485535,
0.34117648005485535), (0.62184876203536987, 0.34509804844856262,
0.34509804844856262), (0.62605041265487671, 0.34509804844856262,
0.34509804844856262), (0.63025212287902832, 0.34509804844856262,
0.34509804844856262), (0.63445377349853516, 0.3490196168422699,
0.3490196168422699), (0.63865548372268677, 0.3490196168422699,
0.3490196168422699), (0.6428571343421936, 0.3490196168422699,
0.3490196168422699), (0.64705884456634521, 0.35294118523597717,
0.35294118523597717), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.35294118523597717,
0.35294118523597717), (0.6596638560295105, 0.35686275362968445,
0.35686275362968445), (0.66386556625366211, 0.35686275362968445,
0.35686275362968445), (0.66806721687316895, 0.35686275362968445,
0.35686275362968445), (0.67226892709732056, 0.36078432202339172,
0.36078432202339172), (0.67647057771682739, 0.36078432202339172,
0.36078432202339172), (0.680672287940979, 0.36078432202339172,
0.36078432202339172), (0.68487393856048584, 0.364705890417099,
0.364705890417099), (0.68907564878463745, 0.364705890417099,
0.364705890417099), (0.69327729940414429, 0.364705890417099,
0.364705890417099), (0.6974790096282959, 0.36862745881080627,
0.36862745881080627), (0.70168066024780273, 0.36862745881080627,
0.36862745881080627), (0.70588237047195435, 0.36862745881080627,
0.36862745881080627), (0.71008402109146118, 0.37254902720451355,
0.37254902720451355), (0.71428573131561279, 0.37254902720451355,
0.37254902720451355), (0.71848738193511963, 0.37254902720451355,
0.37254902720451355), (0.72268909215927124, 0.37647059559822083,
0.37647059559822083), (0.72689074277877808, 0.37647059559822083,
0.37647059559822083), (0.73109245300292969, 0.3803921639919281,
0.3803921639919281), (0.73529410362243652, 0.3803921639919281,
0.3803921639919281), (0.73949581384658813, 0.3803921639919281,
0.3803921639919281), (0.74369746446609497, 0.38431373238563538,
0.38431373238563538), (0.74789917469024658, 0.38431373238563538,
0.38431373238563538), (0.75210082530975342, 0.38431373238563538,
0.38431373238563538), (0.75630253553390503, 0.38823530077934265,
0.38823530077934265), (0.76050418615341187, 0.38823530077934265,
0.38823530077934265), (0.76470589637756348, 0.38823530077934265,
0.38823530077934265), (0.76890754699707031, 0.39215686917304993,
0.39215686917304993), (0.77310925722122192, 0.39215686917304993,
0.39215686917304993), (0.77731090784072876, 0.39215686917304993,
0.39215686917304993), (0.78151261806488037, 0.3960784375667572,
0.3960784375667572), (0.78571426868438721, 0.3960784375667572,
0.3960784375667572), (0.78991597890853882, 0.40784314274787903,
0.40784314274787903), (0.79411762952804565, 0.41568627953529358,
0.41568627953529358), (0.79831933975219727, 0.42352941632270813,
0.42352941632270813), (0.8025209903717041, 0.43529412150382996,
0.43529412150382996), (0.80672270059585571, 0.44313725829124451,
0.44313725829124451), (0.81092435121536255, 0.45490196347236633,
0.45490196347236633), (0.81512606143951416, 0.46274510025978088,
0.46274510025978088), (0.819327712059021, 0.47450980544090271,
0.47450980544090271), (0.82352942228317261, 0.48235294222831726,
0.48235294222831726), (0.82773107290267944, 0.49411764740943909,
0.49411764740943909), (0.83193278312683105, 0.5058823823928833,
0.5058823823928833), (0.83613443374633789, 0.51372551918029785,
0.51372551918029785), (0.8403361439704895, 0.52549022436141968,
0.52549022436141968), (0.84453779458999634, 0.5372549295425415,
0.5372549295425415), (0.84873950481414795, 0.54509806632995605,
0.54509806632995605), (0.85294115543365479, 0.55686277151107788,
0.55686277151107788), (0.8571428656578064, 0.56862747669219971,
0.56862747669219971), (0.86134451627731323, 0.58039218187332153,
0.58039218187332153), (0.86554622650146484, 0.58823531866073608,
0.58823531866073608), (0.86974787712097168, 0.60000002384185791,
0.60000002384185791), (0.87394958734512329, 0.61176472902297974,
0.61176472902297974), (0.87815123796463013, 0.62352943420410156,
0.62352943420410156), (0.88235294818878174, 0.63529413938522339,
0.63529413938522339), (0.88655459880828857, 0.64705884456634521,
0.64705884456634521), (0.89075630903244019, 0.65882354974746704,
0.65882354974746704), (0.89495795965194702, 0.66666668653488159,
0.66666668653488159), (0.89915966987609863, 0.67843139171600342,
0.67843139171600342), (0.90336132049560547, 0.69019609689712524,
0.69019609689712524), (0.90756303071975708, 0.70196080207824707,
0.70196080207824707), (0.91176468133926392, 0.7137255072593689,
0.7137255072593689), (0.91596639156341553, 0.72549021244049072,
0.72549021244049072), (0.92016804218292236, 0.74117648601531982,
0.74117648601531982), (0.92436975240707397, 0.75294119119644165,
0.75294119119644165), (0.92857140302658081, 0.76470589637756348,
0.76470589637756348), (0.93277311325073242, 0.7764706015586853,
0.7764706015586853), (0.93697476387023926, 0.78823530673980713,
0.78823530673980713), (0.94117647409439087, 0.80000001192092896,
0.80000001192092896), (0.94537812471389771, 0.81176471710205078,
0.81176471710205078), (0.94957983493804932, 0.82745099067687988,
0.82745099067687988), (0.95378148555755615, 0.83921569585800171,
0.83921569585800171), (0.95798319578170776, 0.85098040103912354,
0.85098040103912354), (0.9621848464012146, 0.86274510622024536,
0.86274510622024536), (0.96638655662536621, 0.87843137979507446,
0.87843137979507446), (0.97058820724487305, 0.89019608497619629,
0.89019608497619629), (0.97478991746902466, 0.90196079015731812,
0.90196079015731812), (0.97899156808853149, 0.91764706373214722,
0.91764706373214722), (0.98319327831268311, 0.92941176891326904,
0.92941176891326904), (0.98739492893218994, 0.94509804248809814,
0.94509804248809814), (0.99159663915634155, 0.95686274766921997,
0.95686274766921997), (0.99579828977584839, 0.97254902124404907,
0.97254902124404907), (1.0, 0.9843137264251709, 0.9843137264251709)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.011764706112444401, 0.011764706112444401),
(0.037815127521753311, 0.023529412224888802, 0.023529412224888802),
(0.042016807943582535, 0.031372550874948502, 0.031372550874948502),
(0.046218488365411758, 0.043137256056070328, 0.043137256056070328),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.062745101749897003, 0.062745101749897003),
(0.058823529630899429, 0.070588238537311554, 0.070588238537311554),
(0.063025213778018951, 0.08235294371843338, 0.08235294371843338),
(0.067226894199848175, 0.090196080505847931, 0.090196080505847931),
(0.071428574621677399, 0.10196078568696976, 0.10196078568696976),
(0.075630255043506622, 0.10980392247438431, 0.10980392247438431),
(0.079831935465335846, 0.12156862765550613, 0.12156862765550613),
(0.08403361588716507, 0.12941177189350128, 0.12941177189350128),
(0.088235296308994293, 0.14117647707462311, 0.14117647707462311),
(0.092436976730823517, 0.14901961386203766, 0.14901961386203766),
(0.09663865715265274, 0.16078431904315948, 0.16078431904315948),
(0.10084033757448196, 0.16862745583057404, 0.16862745583057404),
(0.10504201799631119, 0.17647059261798859, 0.17647059261798859),
(0.10924369841814041, 0.18823529779911041, 0.18823529779911041),
(0.11344537883996964, 0.19607843458652496, 0.19607843458652496),
(0.11764705926179886, 0.20392157137393951, 0.20392157137393951),
(0.12184873968362808, 0.21568627655506134, 0.21568627655506134),
(0.1260504275560379, 0.22352941334247589, 0.22352941334247589),
(0.13025210797786713, 0.23137255012989044, 0.23137255012989044),
(0.13445378839969635, 0.23921568691730499, 0.23921568691730499),
(0.13865546882152557, 0.25098040699958801, 0.25098040699958801),
(0.1428571492433548, 0.25882354378700256, 0.25882354378700256),
(0.14705882966518402, 0.26666668057441711, 0.26666668057441711),
(0.15126051008701324, 0.27450981736183167, 0.27450981736183167),
(0.15546219050884247, 0.28235295414924622, 0.28235295414924622),
(0.15966387093067169, 0.29019609093666077, 0.29019609093666077),
(0.16386555135250092, 0.30196079611778259, 0.30196079611778259),
(0.16806723177433014, 0.30980393290519714, 0.30980393290519714),
(0.17226891219615936, 0.31764706969261169, 0.31764706969261169),
(0.17647059261798859, 0.32549020648002625, 0.32549020648002625),
(0.18067227303981781, 0.3333333432674408, 0.3333333432674408),
(0.18487395346164703, 0.34117648005485535, 0.34117648005485535),
(0.18907563388347626, 0.3490196168422699, 0.3490196168422699),
(0.19327731430530548, 0.35686275362968445, 0.35686275362968445),
(0.1974789947271347, 0.364705890417099, 0.364705890417099),
(0.20168067514896393, 0.37254902720451355, 0.37254902720451355),
(0.20588235557079315, 0.3803921639919281, 0.3803921639919281),
(0.21008403599262238, 0.38823530077934265, 0.38823530077934265),
(0.2142857164144516, 0.39215686917304993, 0.39215686917304993),
(0.21848739683628082, 0.40000000596046448, 0.40000000596046448),
(0.22268907725811005, 0.40784314274787903, 0.40784314274787903),
(0.22689075767993927, 0.41568627953529358, 0.41568627953529358),
(0.23109243810176849, 0.42352941632270813, 0.42352941632270813),
(0.23529411852359772, 0.42745098471641541, 0.42745098471641541),
(0.23949579894542694, 0.43529412150382996, 0.43529412150382996),
(0.24369747936725616, 0.44313725829124451, 0.44313725829124451),
(0.24789915978908539, 0.45098039507865906, 0.45098039507865906),
(0.25210085511207581, 0.45490196347236633, 0.45490196347236633),
(0.25630253553390503, 0.46274510025978088, 0.46274510025978088),
(0.26050421595573425, 0.47058823704719543, 0.47058823704719543),
(0.26470589637756348, 0.47450980544090271, 0.47450980544090271),
(0.2689075767993927, 0.48235294222831726, 0.48235294222831726),
(0.27310925722122192, 0.49019607901573181, 0.49019607901573181),
(0.27731093764305115, 0.49411764740943909, 0.49411764740943909),
(0.28151261806488037, 0.50196081399917603, 0.50196081399917603),
(0.28571429848670959, 0.50196081399917603, 0.50196081399917603),
(0.28991597890853882, 0.5058823823928833, 0.5058823823928833),
(0.29411765933036804, 0.5058823823928833, 0.5058823823928833),
(0.29831933975219727, 0.50980395078659058, 0.50980395078659058),
(0.30252102017402649, 0.51372551918029785, 0.51372551918029785),
(0.30672270059585571, 0.51372551918029785, 0.51372551918029785),
(0.31092438101768494, 0.51764708757400513, 0.51764708757400513),
(0.31512606143951416, 0.5215686559677124, 0.5215686559677124),
(0.31932774186134338, 0.5215686559677124, 0.5215686559677124),
(0.32352942228317261, 0.52549022436141968, 0.52549022436141968),
(0.32773110270500183, 0.52549022436141968, 0.52549022436141968),
(0.33193278312683105, 0.52941179275512695, 0.52941179275512695),
(0.33613446354866028, 0.53333336114883423, 0.53333336114883423),
(0.3403361439704895, 0.53333336114883423, 0.53333336114883423),
(0.34453782439231873, 0.5372549295425415, 0.5372549295425415),
(0.34873950481414795, 0.54117649793624878, 0.54117649793624878),
(0.35294118523597717, 0.54117649793624878, 0.54117649793624878),
(0.3571428656578064, 0.54509806632995605, 0.54509806632995605),
(0.36134454607963562, 0.54901963472366333, 0.54901963472366333),
(0.36554622650146484, 0.54901963472366333, 0.54901963472366333),
(0.36974790692329407, 0.55294120311737061, 0.55294120311737061),
(0.37394958734512329, 0.55294120311737061, 0.55294120311737061),
(0.37815126776695251, 0.55686277151107788, 0.55686277151107788),
(0.38235294818878174, 0.56078433990478516, 0.56078433990478516),
(0.38655462861061096, 0.56078433990478516, 0.56078433990478516),
(0.39075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.39495798945426941, 0.56862747669219971, 0.56862747669219971),
(0.39915966987609863, 0.56862747669219971, 0.56862747669219971),
(0.40336135029792786, 0.57254904508590698, 0.57254904508590698),
(0.40756303071975708, 0.57254904508590698, 0.57254904508590698),
(0.4117647111415863, 0.57647061347961426, 0.57647061347961426),
(0.41596639156341553, 0.58039218187332153, 0.58039218187332153),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.58431375026702881, 0.58431375026702881),
(0.4285714328289032, 0.58823531866073608, 0.58823531866073608),
(0.43277311325073242, 0.58823531866073608, 0.58823531866073608),
(0.43697479367256165, 0.59215688705444336, 0.59215688705444336),
(0.44117647409439087, 0.59215688705444336, 0.59215688705444336),
(0.44537815451622009, 0.59607845544815063, 0.59607845544815063),
(0.44957983493804932, 0.60000002384185791, 0.60000002384185791),
(0.45378151535987854, 0.60000002384185791, 0.60000002384185791),
(0.45798319578170776, 0.60392159223556519, 0.60392159223556519),
(0.46218487620353699, 0.60784316062927246, 0.60784316062927246),
(0.46638655662536621, 0.60784316062927246, 0.60784316062927246),
(0.47058823704719543, 0.61176472902297974, 0.61176472902297974),
(0.47478991746902466, 0.61176472902297974, 0.61176472902297974),
(0.47899159789085388, 0.61568629741668701, 0.61568629741668701),
(0.48319327831268311, 0.61960786581039429, 0.61960786581039429),
(0.48739495873451233, 0.61960786581039429, 0.61960786581039429),
(0.49159663915634155, 0.62352943420410156, 0.62352943420410156),
(0.49579831957817078, 0.62745100259780884, 0.62745100259780884), (0.5,
0.62745100259780884, 0.62745100259780884), (0.50420171022415161,
0.63137257099151611, 0.63137257099151611), (0.50840336084365845,
0.63137257099151611, 0.63137257099151611), (0.51260507106781006,
0.63529413938522339, 0.63529413938522339), (0.51680672168731689,
0.63921570777893066, 0.63921570777893066), (0.52100843191146851,
0.63921570777893066, 0.63921570777893066), (0.52521008253097534,
0.64313727617263794, 0.64313727617263794), (0.52941179275512695,
0.64705884456634521, 0.64705884456634521), (0.53361344337463379,
0.64705884456634521, 0.64705884456634521), (0.5378151535987854,
0.65098041296005249, 0.65098041296005249), (0.54201680421829224,
0.65098041296005249, 0.65098041296005249), (0.54621851444244385,
0.65490198135375977, 0.65490198135375977), (0.55042016506195068,
0.65882354974746704, 0.65882354974746704), (0.55462187528610229,
0.65882354974746704, 0.65882354974746704), (0.55882352590560913,
0.65882354974746704, 0.65882354974746704), (0.56302523612976074,
0.66274511814117432, 0.66274511814117432), (0.56722688674926758,
0.66274511814117432, 0.66274511814117432), (0.57142859697341919,
0.66666668653488159, 0.66666668653488159), (0.57563024759292603,
0.66666668653488159, 0.66666668653488159), (0.57983195781707764,
0.67058825492858887, 0.67058825492858887), (0.58403360843658447,
0.67058825492858887, 0.67058825492858887), (0.58823531866073608,
0.67450982332229614, 0.67450982332229614), (0.59243696928024292,
0.67450982332229614, 0.67450982332229614), (0.59663867950439453,
0.67450982332229614, 0.67450982332229614), (0.60084033012390137,
0.67843139171600342, 0.67843139171600342), (0.60504204034805298,
0.67843139171600342, 0.67843139171600342), (0.60924369096755981,
0.68235296010971069, 0.68235296010971069), (0.61344540119171143,
0.68235296010971069, 0.68235296010971069), (0.61764705181121826,
0.68627452850341797, 0.68627452850341797), (0.62184876203536987,
0.68627452850341797, 0.68627452850341797), (0.62605041265487671,
0.68627452850341797, 0.68627452850341797), (0.63025212287902832,
0.69019609689712524, 0.69019609689712524), (0.63445377349853516,
0.69019609689712524, 0.69019609689712524), (0.63865548372268677,
0.69411766529083252, 0.69411766529083252), (0.6428571343421936,
0.69411766529083252, 0.69411766529083252), (0.64705884456634521,
0.69803923368453979, 0.69803923368453979), (0.65126049518585205,
0.69803923368453979, 0.69803923368453979), (0.65546220541000366,
0.70196080207824707, 0.70196080207824707), (0.6596638560295105,
0.70196080207824707, 0.70196080207824707), (0.66386556625366211,
0.70196080207824707, 0.70196080207824707), (0.66806721687316895,
0.70588237047195435, 0.70588237047195435), (0.67226892709732056,
0.70588237047195435, 0.70588237047195435), (0.67647057771682739,
0.70980393886566162, 0.70980393886566162), (0.680672287940979,
0.70980393886566162, 0.70980393886566162), (0.68487393856048584,
0.7137255072593689, 0.7137255072593689), (0.68907564878463745,
0.7137255072593689, 0.7137255072593689), (0.69327729940414429,
0.71764707565307617, 0.71764707565307617), (0.6974790096282959,
0.71764707565307617, 0.71764707565307617), (0.70168066024780273,
0.7137255072593689, 0.7137255072593689), (0.70588237047195435,
0.70980393886566162, 0.70980393886566162), (0.71008402109146118,
0.70980393886566162, 0.70980393886566162), (0.71428573131561279,
0.70588237047195435, 0.70588237047195435), (0.71848738193511963,
0.70196080207824707, 0.70196080207824707), (0.72268909215927124,
0.69803923368453979, 0.69803923368453979), (0.72689074277877808,
0.69411766529083252, 0.69411766529083252), (0.73109245300292969,
0.69019609689712524, 0.69019609689712524), (0.73529410362243652,
0.68627452850341797, 0.68627452850341797), (0.73949581384658813,
0.68235296010971069, 0.68235296010971069), (0.74369746446609497,
0.67843139171600342, 0.67843139171600342), (0.74789917469024658,
0.67450982332229614, 0.67450982332229614), (0.75210082530975342,
0.67058825492858887, 0.67058825492858887), (0.75630253553390503,
0.66666668653488159, 0.66666668653488159), (0.76050418615341187,
0.66274511814117432, 0.66274511814117432), (0.76470589637756348,
0.65882354974746704, 0.65882354974746704), (0.76890754699707031,
0.65490198135375977, 0.65490198135375977), (0.77310925722122192,
0.65098041296005249, 0.65098041296005249), (0.77731090784072876,
0.64705884456634521, 0.64705884456634521), (0.78151261806488037,
0.64313727617263794, 0.64313727617263794), (0.78571426868438721,
0.63921570777893066, 0.63921570777893066), (0.78991597890853882,
0.63921570777893066, 0.63921570777893066), (0.79411762952804565,
0.64313727617263794, 0.64313727617263794), (0.79831933975219727,
0.64313727617263794, 0.64313727617263794), (0.8025209903717041,
0.64705884456634521, 0.64705884456634521), (0.80672270059585571,
0.64705884456634521, 0.64705884456634521), (0.81092435121536255,
0.65098041296005249, 0.65098041296005249), (0.81512606143951416,
0.65490198135375977, 0.65490198135375977), (0.819327712059021,
0.65490198135375977, 0.65490198135375977), (0.82352942228317261,
0.65882354974746704, 0.65882354974746704), (0.82773107290267944,
0.66274511814117432, 0.66274511814117432), (0.83193278312683105,
0.66666668653488159, 0.66666668653488159), (0.83613443374633789,
0.67058825492858887, 0.67058825492858887), (0.8403361439704895,
0.67450982332229614, 0.67450982332229614), (0.84453779458999634,
0.67843139171600342, 0.67843139171600342), (0.84873950481414795,
0.68235296010971069, 0.68235296010971069), (0.85294115543365479,
0.68627452850341797, 0.68627452850341797), (0.8571428656578064,
0.69019609689712524, 0.69019609689712524), (0.86134451627731323,
0.69411766529083252, 0.69411766529083252), (0.86554622650146484,
0.69803923368453979, 0.69803923368453979), (0.86974787712097168,
0.70196080207824707, 0.70196080207824707), (0.87394958734512329,
0.70980393886566162, 0.70980393886566162), (0.87815123796463013,
0.7137255072593689, 0.7137255072593689), (0.88235294818878174,
0.72156864404678345, 0.72156864404678345), (0.88655459880828857,
0.72549021244049072, 0.72549021244049072), (0.89075630903244019,
0.73333334922790527, 0.73333334922790527), (0.89495795965194702,
0.73725491762161255, 0.73725491762161255), (0.89915966987609863,
0.7450980544090271, 0.7450980544090271), (0.90336132049560547,
0.75294119119644165, 0.75294119119644165), (0.90756303071975708,
0.7607843279838562, 0.7607843279838562), (0.91176468133926392,
0.76862746477127075, 0.76862746477127075), (0.91596639156341553,
0.7764706015586853, 0.7764706015586853), (0.92016804218292236,
0.78431373834609985, 0.78431373834609985), (0.92436975240707397,
0.7921568751335144, 0.7921568751335144), (0.92857140302658081,
0.80000001192092896, 0.80000001192092896), (0.93277311325073242,
0.80784314870834351, 0.80784314870834351), (0.93697476387023926,
0.81568628549575806, 0.81568628549575806), (0.94117647409439087,
0.82745099067687988, 0.82745099067687988), (0.94537812471389771,
0.83529412746429443, 0.83529412746429443), (0.94957983493804932,
0.84313726425170898, 0.84313726425170898), (0.95378148555755615,
0.85490196943283081, 0.85490196943283081), (0.95798319578170776,
0.86666667461395264, 0.86666667461395264), (0.9621848464012146,
0.87450981140136719, 0.87450981140136719), (0.96638655662536621,
0.88627451658248901, 0.88627451658248901), (0.97058820724487305,
0.89803922176361084, 0.89803922176361084), (0.97478991746902466,
0.90980392694473267, 0.90980392694473267), (0.97899156808853149,
0.92156863212585449, 0.92156863212585449), (0.98319327831268311,
0.93333333730697632, 0.93333333730697632), (0.98739492893218994,
0.94509804248809814, 0.94509804248809814), (0.99159663915634155,
0.95686274766921997, 0.95686274766921997), (0.99579828977584839,
0.97254902124404907, 0.97254902124404907), (1.0, 0.9843137264251709,
0.9843137264251709)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0, 0.0), (0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0,
0.0), (0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.0, 0.0), (0.037815127521753311,
0.0039215688593685627, 0.0039215688593685627), (0.042016807943582535,
0.0078431377187371254, 0.0078431377187371254), (0.046218488365411758,
0.0078431377187371254, 0.0078431377187371254), (0.050420168787240982,
0.011764706112444401, 0.011764706112444401), (0.054621849209070206,
0.015686275437474251, 0.015686275437474251), (0.058823529630899429,
0.019607843831181526, 0.019607843831181526), (0.063025213778018951,
0.019607843831181526, 0.019607843831181526), (0.067226894199848175,
0.023529412224888802, 0.023529412224888802), (0.071428574621677399,
0.027450980618596077, 0.027450980618596077), (0.075630255043506622,
0.031372550874948502, 0.031372550874948502), (0.079831935465335846,
0.031372550874948502, 0.031372550874948502), (0.08403361588716507,
0.035294119268655777, 0.035294119268655777), (0.088235296308994293,
0.039215687662363052, 0.039215687662363052), (0.092436976730823517,
0.043137256056070328, 0.043137256056070328), (0.09663865715265274,
0.043137256056070328, 0.043137256056070328), (0.10084033757448196,
0.047058824449777603, 0.047058824449777603), (0.10504201799631119,
0.050980392843484879, 0.050980392843484879), (0.10924369841814041,
0.054901961237192154, 0.054901961237192154), (0.11344537883996964,
0.058823529630899429, 0.058823529630899429), (0.11764705926179886,
0.058823529630899429, 0.058823529630899429), (0.12184873968362808,
0.062745101749897003, 0.062745101749897003), (0.1260504275560379,
0.066666670143604279, 0.066666670143604279), (0.13025210797786713,
0.070588238537311554, 0.070588238537311554), (0.13445378839969635,
0.070588238537311554, 0.070588238537311554), (0.13865546882152557,
0.074509806931018829, 0.074509806931018829), (0.1428571492433548,
0.078431375324726105, 0.078431375324726105), (0.14705882966518402,
0.08235294371843338, 0.08235294371843338), (0.15126051008701324,
0.086274512112140656, 0.086274512112140656), (0.15546219050884247,
0.086274512112140656, 0.086274512112140656), (0.15966387093067169,
0.090196080505847931, 0.090196080505847931), (0.16386555135250092,
0.094117648899555206, 0.094117648899555206), (0.16806723177433014,
0.098039217293262482, 0.098039217293262482), (0.17226891219615936,
0.10196078568696976, 0.10196078568696976), (0.17647059261798859,
0.10196078568696976, 0.10196078568696976), (0.18067227303981781,
0.10588235408067703, 0.10588235408067703), (0.18487395346164703,
0.10980392247438431, 0.10980392247438431), (0.18907563388347626,
0.11372549086809158, 0.11372549086809158), (0.19327731430530548,
0.11764705926179886, 0.11764705926179886), (0.1974789947271347,
0.12156862765550613, 0.12156862765550613), (0.20168067514896393,
0.12156862765550613, 0.12156862765550613), (0.20588235557079315,
0.12549020349979401, 0.12549020349979401), (0.21008403599262238,
0.12941177189350128, 0.12941177189350128), (0.2142857164144516,
0.13333334028720856, 0.13333334028720856), (0.21848739683628082,
0.13725490868091583, 0.13725490868091583), (0.22268907725811005,
0.14117647707462311, 0.14117647707462311), (0.22689075767993927,
0.14117647707462311, 0.14117647707462311), (0.23109243810176849,
0.14509804546833038, 0.14509804546833038), (0.23529411852359772,
0.14901961386203766, 0.14901961386203766), (0.23949579894542694,
0.15294118225574493, 0.15294118225574493), (0.24369747936725616,
0.15686275064945221, 0.15686275064945221), (0.24789915978908539,
0.16078431904315948, 0.16078431904315948), (0.25210085511207581,
0.16078431904315948, 0.16078431904315948), (0.25630253553390503,
0.16470588743686676, 0.16470588743686676), (0.26050421595573425,
0.16862745583057404, 0.16862745583057404), (0.26470589637756348,
0.17254902422428131, 0.17254902422428131), (0.2689075767993927,
0.17647059261798859, 0.17647059261798859), (0.27310925722122192,
0.18039216101169586, 0.18039216101169586), (0.27731093764305115,
0.18431372940540314, 0.18431372940540314), (0.28151261806488037,
0.18823529779911041, 0.18823529779911041), (0.28571429848670959,
0.18823529779911041, 0.18823529779911041), (0.28991597890853882,
0.18823529779911041, 0.18823529779911041), (0.29411765933036804,
0.19215686619281769, 0.19215686619281769), (0.29831933975219727,
0.19215686619281769, 0.19215686619281769), (0.30252102017402649,
0.19607843458652496, 0.19607843458652496), (0.30672270059585571,
0.19607843458652496, 0.19607843458652496), (0.31092438101768494,
0.20000000298023224, 0.20000000298023224), (0.31512606143951416,
0.20000000298023224, 0.20000000298023224), (0.31932774186134338,
0.20392157137393951, 0.20392157137393951), (0.32352942228317261,
0.20392157137393951, 0.20392157137393951), (0.32773110270500183,
0.20784313976764679, 0.20784313976764679), (0.33193278312683105,
0.20784313976764679, 0.20784313976764679), (0.33613446354866028,
0.21176470816135406, 0.21176470816135406), (0.3403361439704895,
0.21176470816135406, 0.21176470816135406), (0.34453782439231873,
0.21568627655506134, 0.21568627655506134), (0.34873950481414795,
0.21568627655506134, 0.21568627655506134), (0.35294118523597717,
0.21960784494876862, 0.21960784494876862), (0.3571428656578064,
0.21960784494876862, 0.21960784494876862), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.22352941334247589, 0.22352941334247589), (0.36974790692329407,
0.22745098173618317, 0.22745098173618317), (0.37394958734512329,
0.22745098173618317, 0.22745098173618317), (0.37815126776695251,
0.23137255012989044, 0.23137255012989044), (0.38235294818878174,
0.23137255012989044, 0.23137255012989044), (0.38655462861061096,
0.23529411852359772, 0.23529411852359772), (0.39075630903244019,
0.23921568691730499, 0.23921568691730499), (0.39495798945426941,
0.23921568691730499, 0.23921568691730499), (0.39915966987609863,
0.24313725531101227, 0.24313725531101227), (0.40336135029792786,
0.24313725531101227, 0.24313725531101227), (0.40756303071975708,
0.24705882370471954, 0.24705882370471954), (0.4117647111415863,
0.24705882370471954, 0.24705882370471954), (0.41596639156341553,
0.25098040699958801, 0.25098040699958801), (0.42016807198524475,
0.25098040699958801, 0.25098040699958801), (0.42436975240707397,
0.25490197539329529, 0.25490197539329529), (0.4285714328289032,
0.25490197539329529, 0.25490197539329529), (0.43277311325073242,
0.25882354378700256, 0.25882354378700256), (0.43697479367256165,
0.26274511218070984, 0.26274511218070984), (0.44117647409439087,
0.26274511218070984, 0.26274511218070984), (0.44537815451622009,
0.26666668057441711, 0.26666668057441711), (0.44957983493804932,
0.26666668057441711, 0.26666668057441711), (0.45378151535987854,
0.27058824896812439, 0.27058824896812439), (0.45798319578170776,
0.27058824896812439, 0.27058824896812439), (0.46218487620353699,
0.27450981736183167, 0.27450981736183167), (0.46638655662536621,
0.27843138575553894, 0.27843138575553894), (0.47058823704719543,
0.28627452254295349, 0.28627452254295349), (0.47478991746902466,
0.29803922772407532, 0.29803922772407532), (0.47899159789085388,
0.30588236451148987, 0.30588236451148987), (0.48319327831268311,
0.31764706969261169, 0.31764706969261169), (0.48739495873451233,
0.32549020648002625, 0.32549020648002625), (0.49159663915634155,
0.33725491166114807, 0.33725491166114807), (0.49579831957817078,
0.34509804844856262, 0.34509804844856262), (0.5, 0.35686275362968445,
0.35686275362968445), (0.50420171022415161, 0.36862745881080627,
0.36862745881080627), (0.50840336084365845, 0.37647059559822083,
0.37647059559822083), (0.51260507106781006, 0.38823530077934265,
0.38823530077934265), (0.51680672168731689, 0.3960784375667572,
0.3960784375667572), (0.52100843191146851, 0.40784314274787903,
0.40784314274787903), (0.52521008253097534, 0.41568627953529358,
0.41568627953529358), (0.52941179275512695, 0.42745098471641541,
0.42745098471641541), (0.53361344337463379, 0.43529412150382996,
0.43529412150382996), (0.5378151535987854, 0.44705882668495178,
0.44705882668495178), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.46666666865348816,
0.46666666865348816), (0.55042016506195068, 0.47450980544090271,
0.47450980544090271), (0.55462187528610229, 0.47843137383460999,
0.47843137383460999), (0.55882352590560913, 0.48627451062202454,
0.48627451062202454), (0.56302523612976074, 0.49411764740943909,
0.49411764740943909), (0.56722688674926758, 0.50196081399917603,
0.50196081399917603), (0.57142859697341919, 0.5058823823928833,
0.5058823823928833), (0.57563024759292603, 0.51372551918029785,
0.51372551918029785), (0.57983195781707764, 0.5215686559677124,
0.5215686559677124), (0.58403360843658447, 0.52941179275512695,
0.52941179275512695), (0.58823531866073608, 0.53333336114883423,
0.53333336114883423), (0.59243696928024292, 0.54117649793624878,
0.54117649793624878), (0.59663867950439453, 0.54901963472366333,
0.54901963472366333), (0.60084033012390137, 0.55294120311737061,
0.55294120311737061), (0.60504204034805298, 0.56078433990478516,
0.56078433990478516), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.57647061347961426,
0.57647061347961426), (0.61764705181121826, 0.58431375026702881,
0.58431375026702881), (0.62184876203536987, 0.58823531866073608,
0.58823531866073608), (0.62605041265487671, 0.59607845544815063,
0.59607845544815063), (0.63025212287902832, 0.60392159223556519,
0.60392159223556519), (0.63445377349853516, 0.61176472902297974,
0.61176472902297974), (0.63865548372268677, 0.61568629741668701,
0.61568629741668701), (0.6428571343421936, 0.62352943420410156,
0.62352943420410156), (0.64705884456634521, 0.63137257099151611,
0.63137257099151611), (0.65126049518585205, 0.63921570777893066,
0.63921570777893066), (0.65546220541000366, 0.64705884456634521,
0.64705884456634521), (0.6596638560295105, 0.65098041296005249,
0.65098041296005249), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67450982332229614,
0.67450982332229614), (0.67647057771682739, 0.68235296010971069,
0.68235296010971069), (0.680672287940979, 0.68627452850341797,
0.68627452850341797), (0.68487393856048584, 0.69411766529083252,
0.69411766529083252), (0.68907564878463745, 0.70196080207824707,
0.70196080207824707), (0.69327729940414429, 0.70980393886566162,
0.70980393886566162), (0.6974790096282959, 0.71764707565307617,
0.71764707565307617), (0.70168066024780273, 0.71764707565307617,
0.71764707565307617), (0.70588237047195435, 0.72156864404678345,
0.72156864404678345), (0.71008402109146118, 0.72156864404678345,
0.72156864404678345), (0.71428573131561279, 0.72549021244049072,
0.72549021244049072), (0.71848738193511963, 0.72549021244049072,
0.72549021244049072), (0.72268909215927124, 0.729411780834198,
0.729411780834198), (0.72689074277877808, 0.729411780834198,
0.729411780834198), (0.73109245300292969, 0.73333334922790527,
0.73333334922790527), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.73725491762161255,
0.73725491762161255), (0.75210082530975342, 0.74117648601531982,
0.74117648601531982), (0.75630253553390503, 0.74117648601531982,
0.74117648601531982), (0.76050418615341187, 0.7450980544090271,
0.7450980544090271), (0.76470589637756348, 0.7450980544090271,
0.7450980544090271), (0.76890754699707031, 0.7450980544090271,
0.7450980544090271), (0.77310925722122192, 0.74901962280273438,
0.74901962280273438), (0.77731090784072876, 0.74901962280273438,
0.74901962280273438), (0.78151261806488037, 0.75294119119644165,
0.75294119119644165), (0.78571426868438721, 0.75294119119644165,
0.75294119119644165), (0.78991597890853882, 0.75686275959014893,
0.75686275959014893), (0.79411762952804565, 0.76470589637756348,
0.76470589637756348), (0.79831933975219727, 0.76862746477127075,
0.76862746477127075), (0.8025209903717041, 0.77254903316497803,
0.77254903316497803), (0.80672270059585571, 0.7764706015586853,
0.7764706015586853), (0.81092435121536255, 0.78039216995239258,
0.78039216995239258), (0.81512606143951416, 0.78823530673980713,
0.78823530673980713), (0.819327712059021, 0.7921568751335144,
0.7921568751335144), (0.82352942228317261, 0.79607844352722168,
0.79607844352722168), (0.82773107290267944, 0.80000001192092896,
0.80000001192092896), (0.83193278312683105, 0.80392158031463623,
0.80392158031463623), (0.83613443374633789, 0.81176471710205078,
0.81176471710205078), (0.8403361439704895, 0.81568628549575806,
0.81568628549575806), (0.84453779458999634, 0.81960785388946533,
0.81960785388946533), (0.84873950481414795, 0.82352942228317261,
0.82352942228317261), (0.85294115543365479, 0.82745099067687988,
0.82745099067687988), (0.8571428656578064, 0.83529412746429443,
0.83529412746429443), (0.86134451627731323, 0.83921569585800171,
0.83921569585800171), (0.86554622650146484, 0.84313726425170898,
0.84313726425170898), (0.86974787712097168, 0.84705883264541626,
0.84705883264541626), (0.87394958734512329, 0.85098040103912354,
0.85098040103912354), (0.87815123796463013, 0.85882353782653809,
0.85882353782653809), (0.88235294818878174, 0.86274510622024536,
0.86274510622024536), (0.88655459880828857, 0.86666667461395264,
0.86666667461395264), (0.89075630903244019, 0.87058824300765991,
0.87058824300765991), (0.89495795965194702, 0.87450981140136719,
0.87450981140136719), (0.89915966987609863, 0.88235294818878174,
0.88235294818878174), (0.90336132049560547, 0.88627451658248901,
0.88627451658248901), (0.90756303071975708, 0.89019608497619629,
0.89019608497619629), (0.91176468133926392, 0.89411765336990356,
0.89411765336990356), (0.91596639156341553, 0.89803922176361084,
0.89803922176361084), (0.92016804218292236, 0.90588235855102539,
0.90588235855102539), (0.92436975240707397, 0.90980392694473267,
0.90980392694473267), (0.92857140302658081, 0.91372549533843994,
0.91372549533843994), (0.93277311325073242, 0.91764706373214722,
0.91764706373214722), (0.93697476387023926, 0.92156863212585449,
0.92156863212585449), (0.94117647409439087, 0.92941176891326904,
0.92941176891326904), (0.94537812471389771, 0.93333333730697632,
0.93333333730697632), (0.94957983493804932, 0.93725490570068359,
0.93725490570068359), (0.95378148555755615, 0.94117647409439087,
0.94117647409439087), (0.95798319578170776, 0.94509804248809814,
0.94509804248809814), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.95686274766921997,
0.95686274766921997), (0.97058820724487305, 0.96078431606292725,
0.96078431606292725), (0.97478991746902466, 0.96470588445663452,
0.96470588445663452), (0.97899156808853149, 0.9686274528503418,
0.9686274528503418), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_gray_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0039215688593685627, 0.0039215688593685627), (0.0084033617749810219,
0.0078431377187371254, 0.0078431377187371254), (0.012605042196810246,
0.011764706112444401, 0.011764706112444401), (0.016806723549962044,
0.015686275437474251, 0.015686275437474251), (0.021008403971791267,
0.019607843831181526, 0.019607843831181526), (0.025210084393620491,
0.023529412224888802, 0.023529412224888802), (0.029411764815449715,
0.027450980618596077, 0.027450980618596077), (0.033613447099924088,
0.035294119268655777, 0.035294119268655777), (0.037815127521753311,
0.039215687662363052, 0.039215687662363052), (0.042016807943582535,
0.043137256056070328, 0.043137256056070328), (0.046218488365411758,
0.047058824449777603, 0.047058824449777603), (0.050420168787240982,
0.050980392843484879, 0.050980392843484879), (0.054621849209070206,
0.054901961237192154, 0.054901961237192154), (0.058823529630899429,
0.058823529630899429, 0.058823529630899429), (0.063025213778018951,
0.062745101749897003, 0.062745101749897003), (0.067226894199848175,
0.066666670143604279, 0.066666670143604279), (0.071428574621677399,
0.070588238537311554, 0.070588238537311554), (0.075630255043506622,
0.074509806931018829, 0.074509806931018829), (0.079831935465335846,
0.078431375324726105, 0.078431375324726105), (0.08403361588716507,
0.08235294371843338, 0.08235294371843338), (0.088235296308994293,
0.086274512112140656, 0.086274512112140656), (0.092436976730823517,
0.090196080505847931, 0.090196080505847931), (0.09663865715265274,
0.098039217293262482, 0.098039217293262482), (0.10084033757448196,
0.10196078568696976, 0.10196078568696976), (0.10504201799631119,
0.10588235408067703, 0.10588235408067703), (0.10924369841814041,
0.10980392247438431, 0.10980392247438431), (0.11344537883996964,
0.11372549086809158, 0.11372549086809158), (0.11764705926179886,
0.11764705926179886, 0.11764705926179886), (0.12184873968362808,
0.12156862765550613, 0.12156862765550613), (0.1260504275560379,
0.12549020349979401, 0.12549020349979401), (0.13025210797786713,
0.12941177189350128, 0.12941177189350128), (0.13445378839969635,
0.13333334028720856, 0.13333334028720856), (0.13865546882152557,
0.13725490868091583, 0.13725490868091583), (0.1428571492433548,
0.14117647707462311, 0.14117647707462311), (0.14705882966518402,
0.14509804546833038, 0.14509804546833038), (0.15126051008701324,
0.14901961386203766, 0.14901961386203766), (0.15546219050884247,
0.15294118225574493, 0.15294118225574493), (0.15966387093067169,
0.16078431904315948, 0.16078431904315948), (0.16386555135250092,
0.16470588743686676, 0.16470588743686676), (0.16806723177433014,
0.16862745583057404, 0.16862745583057404), (0.17226891219615936,
0.17254902422428131, 0.17254902422428131), (0.17647059261798859,
0.17647059261798859, 0.17647059261798859), (0.18067227303981781,
0.18039216101169586, 0.18039216101169586), (0.18487395346164703,
0.18431372940540314, 0.18431372940540314), (0.18907563388347626,
0.18823529779911041, 0.18823529779911041), (0.19327731430530548,
0.19215686619281769, 0.19215686619281769), (0.1974789947271347,
0.19607843458652496, 0.19607843458652496), (0.20168067514896393,
0.20000000298023224, 0.20000000298023224), (0.20588235557079315,
0.20392157137393951, 0.20392157137393951), (0.21008403599262238,
0.20784313976764679, 0.20784313976764679), (0.2142857164144516,
0.21176470816135406, 0.21176470816135406), (0.21848739683628082,
0.21568627655506134, 0.21568627655506134), (0.22268907725811005,
0.22352941334247589, 0.22352941334247589), (0.22689075767993927,
0.22745098173618317, 0.22745098173618317), (0.23109243810176849,
0.23137255012989044, 0.23137255012989044), (0.23529411852359772,
0.23529411852359772, 0.23529411852359772), (0.23949579894542694,
0.23921568691730499, 0.23921568691730499), (0.24369747936725616,
0.24313725531101227, 0.24313725531101227), (0.24789915978908539,
0.24705882370471954, 0.24705882370471954), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28627452254295349, 0.28627452254295349), (0.28991597890853882,
0.29019609093666077, 0.29019609093666077), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.3490196168422699, 0.3490196168422699), (0.35294118523597717,
0.35294118523597717, 0.35294118523597717), (0.3571428656578064,
0.35686275362968445, 0.35686275362968445), (0.36134454607963562,
0.36078432202339172, 0.36078432202339172), (0.36554622650146484,
0.364705890417099, 0.364705890417099), (0.36974790692329407,
0.36862745881080627, 0.36862745881080627), (0.37394958734512329,
0.37254902720451355, 0.37254902720451355), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.4117647111415863, 0.4117647111415863), (0.41596639156341553,
0.41568627953529358, 0.41568627953529358), (0.42016807198524475,
0.41960784792900085, 0.41960784792900085), (0.42436975240707397,
0.42352941632270813, 0.42352941632270813), (0.4285714328289032,
0.42745098471641541, 0.42745098471641541), (0.43277311325073242,
0.43137255311012268, 0.43137255311012268), (0.43697479367256165,
0.43529412150382996, 0.43529412150382996), (0.44117647409439087,
0.43921568989753723, 0.43921568989753723), (0.44537815451622009,
0.44313725829124451, 0.44313725829124451), (0.44957983493804932,
0.44705882668495178, 0.44705882668495178), (0.45378151535987854,
0.45098039507865906, 0.45098039507865906), (0.45798319578170776,
0.45490196347236633, 0.45490196347236633), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47450980544090271, 0.47450980544090271), (0.47899159789085388,
0.47843137383460999, 0.47843137383460999), (0.48319327831268311,
0.48235294222831726, 0.48235294222831726), (0.48739495873451233,
0.48627451062202454, 0.48627451062202454), (0.49159663915634155,
0.49019607901573181, 0.49019607901573181), (0.49579831957817078,
0.49411764740943909, 0.49411764740943909), (0.5, 0.49803921580314636,
0.49803921580314636), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.5372549295425415,
0.5372549295425415), (0.54201680421829224, 0.54117649793624878,
0.54117649793624878), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.60392159223556519,
0.60392159223556519), (0.60924369096755981, 0.60784316062927246,
0.60784316062927246), (0.61344540119171143, 0.61176472902297974,
0.61176472902297974), (0.61764705181121826, 0.61568629741668701,
0.61568629741668701), (0.62184876203536987, 0.61960786581039429,
0.61960786581039429), (0.62605041265487671, 0.62352943420410156,
0.62352943420410156), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.66274511814117432,
0.66274511814117432), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67058825492858887,
0.67058825492858887), (0.67647057771682739, 0.67450982332229614,
0.67450982332229614), (0.680672287940979, 0.67843139171600342,
0.67843139171600342), (0.68487393856048584, 0.68235296010971069,
0.68235296010971069), (0.68907564878463745, 0.68627452850341797,
0.68627452850341797), (0.69327729940414429, 0.69019609689712524,
0.69019609689712524), (0.6974790096282959, 0.69411766529083252,
0.69411766529083252), (0.70168066024780273, 0.69803923368453979,
0.69803923368453979), (0.70588237047195435, 0.70196080207824707,
0.70196080207824707), (0.71008402109146118, 0.70588237047195435,
0.70588237047195435), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72549021244049072,
0.72549021244049072), (0.73109245300292969, 0.729411780834198,
0.729411780834198), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73725491762161255,
0.73725491762161255), (0.74369746446609497, 0.74117648601531982,
0.74117648601531982), (0.74789917469024658, 0.7450980544090271,
0.7450980544090271), (0.75210082530975342, 0.74901962280273438,
0.74901962280273438), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78823530673980713,
0.78823530673980713), (0.79411762952804565, 0.7921568751335144,
0.7921568751335144), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.85098040103912354,
0.85098040103912354), (0.8571428656578064, 0.85490196943283081,
0.85490196943283081), (0.86134451627731323, 0.85882353782653809,
0.85882353782653809), (0.86554622650146484, 0.86274510622024536,
0.86274510622024536), (0.86974787712097168, 0.86666667461395264,
0.86666667461395264), (0.87394958734512329, 0.87058824300765991,
0.87058824300765991), (0.87815123796463013, 0.87450981140136719,
0.87450981140136719), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.91372549533843994,
0.91372549533843994), (0.92016804218292236, 0.91764706373214722,
0.91764706373214722), (0.92436975240707397, 0.92156863212585449,
0.92156863212585449), (0.92857140302658081, 0.92549020051956177,
0.92549020051956177), (0.93277311325073242, 0.92941176891326904,
0.92941176891326904), (0.93697476387023926, 0.93333333730697632,
0.93333333730697632), (0.94117647409439087, 0.93725490570068359,
0.93725490570068359), (0.94537812471389771, 0.94117647409439087,
0.94117647409439087), (0.94957983493804932, 0.94509804248809814,
0.94509804248809814), (0.95378148555755615, 0.94901961088180542,
0.94901961088180542), (0.95798319578170776, 0.9529411792755127,
0.9529411792755127), (0.9621848464012146, 0.95686274766921997,
0.95686274766921997), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97647058963775635,
0.97647058963775635), (0.98319327831268311, 0.98039215803146362,
0.98039215803146362), (0.98739492893218994, 0.9843137264251709,
0.9843137264251709), (0.99159663915634155, 0.98823529481887817,
0.98823529481887817), (0.99579828977584839, 0.99215686321258545,
0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.011764706112444401,
0.011764706112444401), (0.016806723549962044, 0.015686275437474251,
0.015686275437474251), (0.021008403971791267, 0.019607843831181526,
0.019607843831181526), (0.025210084393620491, 0.023529412224888802,
0.023529412224888802), (0.029411764815449715, 0.027450980618596077,
0.027450980618596077), (0.033613447099924088, 0.035294119268655777,
0.035294119268655777), (0.037815127521753311, 0.039215687662363052,
0.039215687662363052), (0.042016807943582535, 0.043137256056070328,
0.043137256056070328), (0.046218488365411758, 0.047058824449777603,
0.047058824449777603), (0.050420168787240982, 0.050980392843484879,
0.050980392843484879), (0.054621849209070206, 0.054901961237192154,
0.054901961237192154), (0.058823529630899429, 0.058823529630899429,
0.058823529630899429), (0.063025213778018951, 0.062745101749897003,
0.062745101749897003), (0.067226894199848175, 0.066666670143604279,
0.066666670143604279), (0.071428574621677399, 0.070588238537311554,
0.070588238537311554), (0.075630255043506622, 0.074509806931018829,
0.074509806931018829), (0.079831935465335846, 0.078431375324726105,
0.078431375324726105), (0.08403361588716507, 0.08235294371843338,
0.08235294371843338), (0.088235296308994293, 0.086274512112140656,
0.086274512112140656), (0.092436976730823517, 0.090196080505847931,
0.090196080505847931), (0.09663865715265274, 0.098039217293262482,
0.098039217293262482), (0.10084033757448196, 0.10196078568696976,
0.10196078568696976), (0.10504201799631119, 0.10588235408067703,
0.10588235408067703), (0.10924369841814041, 0.10980392247438431,
0.10980392247438431), (0.11344537883996964, 0.11372549086809158,
0.11372549086809158), (0.11764705926179886, 0.11764705926179886,
0.11764705926179886), (0.12184873968362808, 0.12156862765550613,
0.12156862765550613), (0.1260504275560379, 0.12549020349979401,
0.12549020349979401), (0.13025210797786713, 0.12941177189350128,
0.12941177189350128), (0.13445378839969635, 0.13333334028720856,
0.13333334028720856), (0.13865546882152557, 0.13725490868091583,
0.13725490868091583), (0.1428571492433548, 0.14117647707462311,
0.14117647707462311), (0.14705882966518402, 0.14509804546833038,
0.14509804546833038), (0.15126051008701324, 0.14901961386203766,
0.14901961386203766), (0.15546219050884247, 0.15294118225574493,
0.15294118225574493), (0.15966387093067169, 0.16078431904315948,
0.16078431904315948), (0.16386555135250092, 0.16470588743686676,
0.16470588743686676), (0.16806723177433014, 0.16862745583057404,
0.16862745583057404), (0.17226891219615936, 0.17254902422428131,
0.17254902422428131), (0.17647059261798859, 0.17647059261798859,
0.17647059261798859), (0.18067227303981781, 0.18039216101169586,
0.18039216101169586), (0.18487395346164703, 0.18431372940540314,
0.18431372940540314), (0.18907563388347626, 0.18823529779911041,
0.18823529779911041), (0.19327731430530548, 0.19215686619281769,
0.19215686619281769), (0.1974789947271347, 0.19607843458652496,
0.19607843458652496), (0.20168067514896393, 0.20000000298023224,
0.20000000298023224), (0.20588235557079315, 0.20392157137393951,
0.20392157137393951), (0.21008403599262238, 0.20784313976764679,
0.20784313976764679), (0.2142857164144516, 0.21176470816135406,
0.21176470816135406), (0.21848739683628082, 0.21568627655506134,
0.21568627655506134), (0.22268907725811005, 0.22352941334247589,
0.22352941334247589), (0.22689075767993927, 0.22745098173618317,
0.22745098173618317), (0.23109243810176849, 0.23137255012989044,
0.23137255012989044), (0.23529411852359772, 0.23529411852359772,
0.23529411852359772), (0.23949579894542694, 0.23921568691730499,
0.23921568691730499), (0.24369747936725616, 0.24313725531101227,
0.24313725531101227), (0.24789915978908539, 0.24705882370471954,
0.24705882370471954), (0.25210085511207581, 0.25098040699958801,
0.25098040699958801), (0.25630253553390503, 0.25490197539329529,
0.25490197539329529), (0.26050421595573425, 0.25882354378700256,
0.25882354378700256), (0.26470589637756348, 0.26274511218070984,
0.26274511218070984), (0.2689075767993927, 0.26666668057441711,
0.26666668057441711), (0.27310925722122192, 0.27058824896812439,
0.27058824896812439), (0.27731093764305115, 0.27450981736183167,
0.27450981736183167), (0.28151261806488037, 0.27843138575553894,
0.27843138575553894), (0.28571429848670959, 0.28627452254295349,
0.28627452254295349), (0.28991597890853882, 0.29019609093666077,
0.29019609093666077), (0.29411765933036804, 0.29411765933036804,
0.29411765933036804), (0.29831933975219727, 0.29803922772407532,
0.29803922772407532), (0.30252102017402649, 0.30196079611778259,
0.30196079611778259), (0.30672270059585571, 0.30588236451148987,
0.30588236451148987), (0.31092438101768494, 0.30980393290519714,
0.30980393290519714), (0.31512606143951416, 0.31372550129890442,
0.31372550129890442), (0.31932774186134338, 0.31764706969261169,
0.31764706969261169), (0.32352942228317261, 0.32156863808631897,
0.32156863808631897), (0.32773110270500183, 0.32549020648002625,
0.32549020648002625), (0.33193278312683105, 0.32941177487373352,
0.32941177487373352), (0.33613446354866028, 0.3333333432674408,
0.3333333432674408), (0.3403361439704895, 0.33725491166114807,
0.33725491166114807), (0.34453782439231873, 0.34117648005485535,
0.34117648005485535), (0.34873950481414795, 0.3490196168422699,
0.3490196168422699), (0.35294118523597717, 0.35294118523597717,
0.35294118523597717), (0.3571428656578064, 0.35686275362968445,
0.35686275362968445), (0.36134454607963562, 0.36078432202339172,
0.36078432202339172), (0.36554622650146484, 0.364705890417099,
0.364705890417099), (0.36974790692329407, 0.36862745881080627,
0.36862745881080627), (0.37394958734512329, 0.37254902720451355,
0.37254902720451355), (0.37815126776695251, 0.37647059559822083,
0.37647059559822083), (0.38235294818878174, 0.3803921639919281,
0.3803921639919281), (0.38655462861061096, 0.38431373238563538,
0.38431373238563538), (0.39075630903244019, 0.38823530077934265,
0.38823530077934265), (0.39495798945426941, 0.39215686917304993,
0.39215686917304993), (0.39915966987609863, 0.3960784375667572,
0.3960784375667572), (0.40336135029792786, 0.40000000596046448,
0.40000000596046448), (0.40756303071975708, 0.40392157435417175,
0.40392157435417175), (0.4117647111415863, 0.4117647111415863,
0.4117647111415863), (0.41596639156341553, 0.41568627953529358,
0.41568627953529358), (0.42016807198524475, 0.41960784792900085,
0.41960784792900085), (0.42436975240707397, 0.42352941632270813,
0.42352941632270813), (0.4285714328289032, 0.42745098471641541,
0.42745098471641541), (0.43277311325073242, 0.43137255311012268,
0.43137255311012268), (0.43697479367256165, 0.43529412150382996,
0.43529412150382996), (0.44117647409439087, 0.43921568989753723,
0.43921568989753723), (0.44537815451622009, 0.44313725829124451,
0.44313725829124451), (0.44957983493804932, 0.44705882668495178,
0.44705882668495178), (0.45378151535987854, 0.45098039507865906,
0.45098039507865906), (0.45798319578170776, 0.45490196347236633,
0.45490196347236633), (0.46218487620353699, 0.45882353186607361,
0.45882353186607361), (0.46638655662536621, 0.46274510025978088,
0.46274510025978088), (0.47058823704719543, 0.46666666865348816,
0.46666666865348816), (0.47478991746902466, 0.47450980544090271,
0.47450980544090271), (0.47899159789085388, 0.47843137383460999,
0.47843137383460999), (0.48319327831268311, 0.48235294222831726,
0.48235294222831726), (0.48739495873451233, 0.48627451062202454,
0.48627451062202454), (0.49159663915634155, 0.49019607901573181,
0.49019607901573181), (0.49579831957817078, 0.49411764740943909,
0.49411764740943909), (0.5, 0.49803921580314636, 0.49803921580314636),
(0.50420171022415161, 0.50196081399917603, 0.50196081399917603),
(0.50840336084365845, 0.5058823823928833, 0.5058823823928833),
(0.51260507106781006, 0.50980395078659058, 0.50980395078659058),
(0.51680672168731689, 0.51372551918029785, 0.51372551918029785),
(0.52100843191146851, 0.51764708757400513, 0.51764708757400513),
(0.52521008253097534, 0.5215686559677124, 0.5215686559677124),
(0.52941179275512695, 0.52549022436141968, 0.52549022436141968),
(0.53361344337463379, 0.52941179275512695, 0.52941179275512695),
(0.5378151535987854, 0.5372549295425415, 0.5372549295425415),
(0.54201680421829224, 0.54117649793624878, 0.54117649793624878),
(0.54621851444244385, 0.54509806632995605, 0.54509806632995605),
(0.55042016506195068, 0.54901963472366333, 0.54901963472366333),
(0.55462187528610229, 0.55294120311737061, 0.55294120311737061),
(0.55882352590560913, 0.55686277151107788, 0.55686277151107788),
(0.56302523612976074, 0.56078433990478516, 0.56078433990478516),
(0.56722688674926758, 0.56470590829849243, 0.56470590829849243),
(0.57142859697341919, 0.56862747669219971, 0.56862747669219971),
(0.57563024759292603, 0.57254904508590698, 0.57254904508590698),
(0.57983195781707764, 0.57647061347961426, 0.57647061347961426),
(0.58403360843658447, 0.58039218187332153, 0.58039218187332153),
(0.58823531866073608, 0.58431375026702881, 0.58431375026702881),
(0.59243696928024292, 0.58823531866073608, 0.58823531866073608),
(0.59663867950439453, 0.59215688705444336, 0.59215688705444336),
(0.60084033012390137, 0.60000002384185791, 0.60000002384185791),
(0.60504204034805298, 0.60392159223556519, 0.60392159223556519),
(0.60924369096755981, 0.60784316062927246, 0.60784316062927246),
(0.61344540119171143, 0.61176472902297974, 0.61176472902297974),
(0.61764705181121826, 0.61568629741668701, 0.61568629741668701),
(0.62184876203536987, 0.61960786581039429, 0.61960786581039429),
(0.62605041265487671, 0.62352943420410156, 0.62352943420410156),
(0.63025212287902832, 0.62745100259780884, 0.62745100259780884),
(0.63445377349853516, 0.63137257099151611, 0.63137257099151611),
(0.63865548372268677, 0.63529413938522339, 0.63529413938522339),
(0.6428571343421936, 0.63921570777893066, 0.63921570777893066),
(0.64705884456634521, 0.64313727617263794, 0.64313727617263794),
(0.65126049518585205, 0.64705884456634521, 0.64705884456634521),
(0.65546220541000366, 0.65098041296005249, 0.65098041296005249),
(0.6596638560295105, 0.65490198135375977, 0.65490198135375977),
(0.66386556625366211, 0.66274511814117432, 0.66274511814117432),
(0.66806721687316895, 0.66666668653488159, 0.66666668653488159),
(0.67226892709732056, 0.67058825492858887, 0.67058825492858887),
(0.67647057771682739, 0.67450982332229614, 0.67450982332229614),
(0.680672287940979, 0.67843139171600342, 0.67843139171600342),
(0.68487393856048584, 0.68235296010971069, 0.68235296010971069),
(0.68907564878463745, 0.68627452850341797, 0.68627452850341797),
(0.69327729940414429, 0.69019609689712524, 0.69019609689712524),
(0.6974790096282959, 0.69411766529083252, 0.69411766529083252),
(0.70168066024780273, 0.69803923368453979, 0.69803923368453979),
(0.70588237047195435, 0.70196080207824707, 0.70196080207824707),
(0.71008402109146118, 0.70588237047195435, 0.70588237047195435),
(0.71428573131561279, 0.70980393886566162, 0.70980393886566162),
(0.71848738193511963, 0.7137255072593689, 0.7137255072593689),
(0.72268909215927124, 0.71764707565307617, 0.71764707565307617),
(0.72689074277877808, 0.72549021244049072, 0.72549021244049072),
(0.73109245300292969, 0.729411780834198, 0.729411780834198),
(0.73529410362243652, 0.73333334922790527, 0.73333334922790527),
(0.73949581384658813, 0.73725491762161255, 0.73725491762161255),
(0.74369746446609497, 0.74117648601531982, 0.74117648601531982),
(0.74789917469024658, 0.7450980544090271, 0.7450980544090271),
(0.75210082530975342, 0.74901962280273438, 0.74901962280273438),
(0.75630253553390503, 0.75294119119644165, 0.75294119119644165),
(0.76050418615341187, 0.75686275959014893, 0.75686275959014893),
(0.76470589637756348, 0.7607843279838562, 0.7607843279838562),
(0.76890754699707031, 0.76470589637756348, 0.76470589637756348),
(0.77310925722122192, 0.76862746477127075, 0.76862746477127075),
(0.77731090784072876, 0.77254903316497803, 0.77254903316497803),
(0.78151261806488037, 0.7764706015586853, 0.7764706015586853),
(0.78571426868438721, 0.78039216995239258, 0.78039216995239258),
(0.78991597890853882, 0.78823530673980713, 0.78823530673980713),
(0.79411762952804565, 0.7921568751335144, 0.7921568751335144),
(0.79831933975219727, 0.79607844352722168, 0.79607844352722168),
(0.8025209903717041, 0.80000001192092896, 0.80000001192092896),
(0.80672270059585571, 0.80392158031463623, 0.80392158031463623),
(0.81092435121536255, 0.80784314870834351, 0.80784314870834351),
(0.81512606143951416, 0.81176471710205078, 0.81176471710205078),
(0.819327712059021, 0.81568628549575806, 0.81568628549575806),
(0.82352942228317261, 0.81960785388946533, 0.81960785388946533),
(0.82773107290267944, 0.82352942228317261, 0.82352942228317261),
(0.83193278312683105, 0.82745099067687988, 0.82745099067687988),
(0.83613443374633789, 0.83137255907058716, 0.83137255907058716),
(0.8403361439704895, 0.83529412746429443, 0.83529412746429443),
(0.84453779458999634, 0.83921569585800171, 0.83921569585800171),
(0.84873950481414795, 0.84313726425170898, 0.84313726425170898),
(0.85294115543365479, 0.85098040103912354, 0.85098040103912354),
(0.8571428656578064, 0.85490196943283081, 0.85490196943283081),
(0.86134451627731323, 0.85882353782653809, 0.85882353782653809),
(0.86554622650146484, 0.86274510622024536, 0.86274510622024536),
(0.86974787712097168, 0.86666667461395264, 0.86666667461395264),
(0.87394958734512329, 0.87058824300765991, 0.87058824300765991),
(0.87815123796463013, 0.87450981140136719, 0.87450981140136719),
(0.88235294818878174, 0.87843137979507446, 0.87843137979507446),
(0.88655459880828857, 0.88235294818878174, 0.88235294818878174),
(0.89075630903244019, 0.88627451658248901, 0.88627451658248901),
(0.89495795965194702, 0.89019608497619629, 0.89019608497619629),
(0.89915966987609863, 0.89411765336990356, 0.89411765336990356),
(0.90336132049560547, 0.89803922176361084, 0.89803922176361084),
(0.90756303071975708, 0.90196079015731812, 0.90196079015731812),
(0.91176468133926392, 0.90588235855102539, 0.90588235855102539),
(0.91596639156341553, 0.91372549533843994, 0.91372549533843994),
(0.92016804218292236, 0.91764706373214722, 0.91764706373214722),
(0.92436975240707397, 0.92156863212585449, 0.92156863212585449),
(0.92857140302658081, 0.92549020051956177, 0.92549020051956177),
(0.93277311325073242, 0.92941176891326904, 0.92941176891326904),
(0.93697476387023926, 0.93333333730697632, 0.93333333730697632),
(0.94117647409439087, 0.93725490570068359, 0.93725490570068359),
(0.94537812471389771, 0.94117647409439087, 0.94117647409439087),
(0.94957983493804932, 0.94509804248809814, 0.94509804248809814),
(0.95378148555755615, 0.94901961088180542, 0.94901961088180542),
(0.95798319578170776, 0.9529411792755127, 0.9529411792755127),
(0.9621848464012146, 0.95686274766921997, 0.95686274766921997),
(0.96638655662536621, 0.96078431606292725, 0.96078431606292725),
(0.97058820724487305, 0.96470588445663452, 0.96470588445663452),
(0.97478991746902466, 0.9686274528503418, 0.9686274528503418),
(0.97899156808853149, 0.97647058963775635, 0.97647058963775635),
(0.98319327831268311, 0.98039215803146362, 0.98039215803146362),
(0.98739492893218994, 0.9843137264251709, 0.9843137264251709),
(0.99159663915634155, 0.98823529481887817, 0.98823529481887817),
(0.99579828977584839, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)], 'red': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.035294119268655777, 0.035294119268655777),
(0.037815127521753311, 0.039215687662363052, 0.039215687662363052),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.098039217293262482, 0.098039217293262482),
(0.10084033757448196, 0.10196078568696976, 0.10196078568696976),
(0.10504201799631119, 0.10588235408067703, 0.10588235408067703),
(0.10924369841814041, 0.10980392247438431, 0.10980392247438431),
(0.11344537883996964, 0.11372549086809158, 0.11372549086809158),
(0.11764705926179886, 0.11764705926179886, 0.11764705926179886),
(0.12184873968362808, 0.12156862765550613, 0.12156862765550613),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.16078431904315948, 0.16078431904315948),
(0.16386555135250092, 0.16470588743686676, 0.16470588743686676),
(0.16806723177433014, 0.16862745583057404, 0.16862745583057404),
(0.17226891219615936, 0.17254902422428131, 0.17254902422428131),
(0.17647059261798859, 0.17647059261798859, 0.17647059261798859),
(0.18067227303981781, 0.18039216101169586, 0.18039216101169586),
(0.18487395346164703, 0.18431372940540314, 0.18431372940540314),
(0.18907563388347626, 0.18823529779911041, 0.18823529779911041),
(0.19327731430530548, 0.19215686619281769, 0.19215686619281769),
(0.1974789947271347, 0.19607843458652496, 0.19607843458652496),
(0.20168067514896393, 0.20000000298023224, 0.20000000298023224),
(0.20588235557079315, 0.20392157137393951, 0.20392157137393951),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.22352941334247589, 0.22352941334247589),
(0.22689075767993927, 0.22745098173618317, 0.22745098173618317),
(0.23109243810176849, 0.23137255012989044, 0.23137255012989044),
(0.23529411852359772, 0.23529411852359772, 0.23529411852359772),
(0.23949579894542694, 0.23921568691730499, 0.23921568691730499),
(0.24369747936725616, 0.24313725531101227, 0.24313725531101227),
(0.24789915978908539, 0.24705882370471954, 0.24705882370471954),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28627452254295349, 0.28627452254295349),
(0.28991597890853882, 0.29019609093666077, 0.29019609093666077),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.3490196168422699, 0.3490196168422699),
(0.35294118523597717, 0.35294118523597717, 0.35294118523597717),
(0.3571428656578064, 0.35686275362968445, 0.35686275362968445),
(0.36134454607963562, 0.36078432202339172, 0.36078432202339172),
(0.36554622650146484, 0.364705890417099, 0.364705890417099),
(0.36974790692329407, 0.36862745881080627, 0.36862745881080627),
(0.37394958734512329, 0.37254902720451355, 0.37254902720451355),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.4117647111415863, 0.4117647111415863),
(0.41596639156341553, 0.41568627953529358, 0.41568627953529358),
(0.42016807198524475, 0.41960784792900085, 0.41960784792900085),
(0.42436975240707397, 0.42352941632270813, 0.42352941632270813),
(0.4285714328289032, 0.42745098471641541, 0.42745098471641541),
(0.43277311325073242, 0.43137255311012268, 0.43137255311012268),
(0.43697479367256165, 0.43529412150382996, 0.43529412150382996),
(0.44117647409439087, 0.43921568989753723, 0.43921568989753723),
(0.44537815451622009, 0.44313725829124451, 0.44313725829124451),
(0.44957983493804932, 0.44705882668495178, 0.44705882668495178),
(0.45378151535987854, 0.45098039507865906, 0.45098039507865906),
(0.45798319578170776, 0.45490196347236633, 0.45490196347236633),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47450980544090271, 0.47450980544090271),
(0.47899159789085388, 0.47843137383460999, 0.47843137383460999),
(0.48319327831268311, 0.48235294222831726, 0.48235294222831726),
(0.48739495873451233, 0.48627451062202454, 0.48627451062202454),
(0.49159663915634155, 0.49019607901573181, 0.49019607901573181),
(0.49579831957817078, 0.49411764740943909, 0.49411764740943909), (0.5,
0.49803921580314636, 0.49803921580314636), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.5372549295425415, 0.5372549295425415), (0.54201680421829224,
0.54117649793624878, 0.54117649793624878), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.60000002384185791, 0.60000002384185791), (0.60504204034805298,
0.60392159223556519, 0.60392159223556519), (0.60924369096755981,
0.60784316062927246, 0.60784316062927246), (0.61344540119171143,
0.61176472902297974, 0.61176472902297974), (0.61764705181121826,
0.61568629741668701, 0.61568629741668701), (0.62184876203536987,
0.61960786581039429, 0.61960786581039429), (0.62605041265487671,
0.62352943420410156, 0.62352943420410156), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.66274511814117432, 0.66274511814117432), (0.66806721687316895,
0.66666668653488159, 0.66666668653488159), (0.67226892709732056,
0.67058825492858887, 0.67058825492858887), (0.67647057771682739,
0.67450982332229614, 0.67450982332229614), (0.680672287940979,
0.67843139171600342, 0.67843139171600342), (0.68487393856048584,
0.68235296010971069, 0.68235296010971069), (0.68907564878463745,
0.68627452850341797, 0.68627452850341797), (0.69327729940414429,
0.69019609689712524, 0.69019609689712524), (0.6974790096282959,
0.69411766529083252, 0.69411766529083252), (0.70168066024780273,
0.69803923368453979, 0.69803923368453979), (0.70588237047195435,
0.70196080207824707, 0.70196080207824707), (0.71008402109146118,
0.70588237047195435, 0.70588237047195435), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72549021244049072, 0.72549021244049072), (0.73109245300292969,
0.729411780834198, 0.729411780834198), (0.73529410362243652,
0.73333334922790527, 0.73333334922790527), (0.73949581384658813,
0.73725491762161255, 0.73725491762161255), (0.74369746446609497,
0.74117648601531982, 0.74117648601531982), (0.74789917469024658,
0.7450980544090271, 0.7450980544090271), (0.75210082530975342,
0.74901962280273438, 0.74901962280273438), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78823530673980713, 0.78823530673980713), (0.79411762952804565,
0.7921568751335144, 0.7921568751335144), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.85098040103912354, 0.85098040103912354), (0.8571428656578064,
0.85490196943283081, 0.85490196943283081), (0.86134451627731323,
0.85882353782653809, 0.85882353782653809), (0.86554622650146484,
0.86274510622024536, 0.86274510622024536), (0.86974787712097168,
0.86666667461395264, 0.86666667461395264), (0.87394958734512329,
0.87058824300765991, 0.87058824300765991), (0.87815123796463013,
0.87450981140136719, 0.87450981140136719), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.91372549533843994, 0.91372549533843994), (0.92016804218292236,
0.91764706373214722, 0.91764706373214722), (0.92436975240707397,
0.92156863212585449, 0.92156863212585449), (0.92857140302658081,
0.92549020051956177, 0.92549020051956177), (0.93277311325073242,
0.92941176891326904, 0.92941176891326904), (0.93697476387023926,
0.93333333730697632, 0.93333333730697632), (0.94117647409439087,
0.93725490570068359, 0.93725490570068359), (0.94537812471389771,
0.94117647409439087, 0.94117647409439087), (0.94957983493804932,
0.94509804248809814, 0.94509804248809814), (0.95378148555755615,
0.94901961088180542, 0.94901961088180542), (0.95798319578170776,
0.9529411792755127, 0.9529411792755127), (0.9621848464012146,
0.95686274766921997, 0.95686274766921997), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97647058963775635, 0.97647058963775635), (0.98319327831268311,
0.98039215803146362, 0.98039215803146362), (0.98739492893218994,
0.9843137264251709, 0.9843137264251709), (0.99159663915634155,
0.98823529481887817, 0.98823529481887817), (0.99579828977584839,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)]}
_gist_heat_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388, 0.0, 0.0),
(0.48319327831268311, 0.0, 0.0), (0.48739495873451233, 0.0, 0.0),
(0.49159663915634155, 0.0, 0.0), (0.49579831957817078, 0.0, 0.0), (0.5,
0.0, 0.0), (0.50420171022415161, 0.0, 0.0), (0.50840336084365845, 0.0,
0.0), (0.51260507106781006, 0.0, 0.0), (0.51680672168731689, 0.0, 0.0),
(0.52100843191146851, 0.0, 0.0), (0.52521008253097534, 0.0, 0.0),
(0.52941179275512695, 0.0, 0.0), (0.53361344337463379, 0.0, 0.0),
(0.5378151535987854, 0.0, 0.0), (0.54201680421829224, 0.0, 0.0),
(0.54621851444244385, 0.0, 0.0), (0.55042016506195068, 0.0, 0.0),
(0.55462187528610229, 0.0, 0.0), (0.55882352590560913, 0.0, 0.0),
(0.56302523612976074, 0.0, 0.0), (0.56722688674926758, 0.0, 0.0),
(0.57142859697341919, 0.0, 0.0), (0.57563024759292603, 0.0, 0.0),
(0.57983195781707764, 0.0, 0.0), (0.58403360843658447, 0.0, 0.0),
(0.58823531866073608, 0.0, 0.0), (0.59243696928024292, 0.0, 0.0),
(0.59663867950439453, 0.0, 0.0), (0.60084033012390137, 0.0, 0.0),
(0.60504204034805298, 0.0, 0.0), (0.60924369096755981, 0.0, 0.0),
(0.61344540119171143, 0.0, 0.0), (0.61764705181121826, 0.0, 0.0),
(0.62184876203536987, 0.0, 0.0), (0.62605041265487671, 0.0, 0.0),
(0.63025212287902832, 0.0, 0.0), (0.63445377349853516, 0.0, 0.0),
(0.63865548372268677, 0.0, 0.0), (0.6428571343421936, 0.0, 0.0),
(0.64705884456634521, 0.0, 0.0), (0.65126049518585205, 0.0, 0.0),
(0.65546220541000366, 0.0, 0.0), (0.6596638560295105, 0.0, 0.0),
(0.66386556625366211, 0.0, 0.0), (0.66806721687316895, 0.0, 0.0),
(0.67226892709732056, 0.0, 0.0), (0.67647057771682739, 0.0, 0.0),
(0.680672287940979, 0.0, 0.0), (0.68487393856048584, 0.0, 0.0),
(0.68907564878463745, 0.0, 0.0), (0.69327729940414429, 0.0, 0.0),
(0.6974790096282959, 0.0, 0.0), (0.70168066024780273, 0.0, 0.0),
(0.70588237047195435, 0.0, 0.0), (0.71008402109146118, 0.0, 0.0),
(0.71428573131561279, 0.0, 0.0), (0.71848738193511963, 0.0, 0.0),
(0.72268909215927124, 0.0, 0.0), (0.72689074277877808, 0.0, 0.0),
(0.73109245300292969, 0.0, 0.0), (0.73529410362243652, 0.0, 0.0),
(0.73949581384658813, 0.0, 0.0), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.0, 0.0), (0.75210082530975342, 0.0, 0.0),
(0.75630253553390503, 0.027450980618596077, 0.027450980618596077),
(0.76050418615341187, 0.043137256056070328, 0.043137256056070328),
(0.76470589637756348, 0.058823529630899429, 0.058823529630899429),
(0.76890754699707031, 0.074509806931018829, 0.074509806931018829),
(0.77310925722122192, 0.090196080505847931, 0.090196080505847931),
(0.77731090784072876, 0.10588235408067703, 0.10588235408067703),
(0.78151261806488037, 0.12156862765550613, 0.12156862765550613),
(0.78571426868438721, 0.13725490868091583, 0.13725490868091583),
(0.78991597890853882, 0.15294118225574493, 0.15294118225574493),
(0.79411762952804565, 0.16862745583057404, 0.16862745583057404),
(0.79831933975219727, 0.20000000298023224, 0.20000000298023224),
(0.8025209903717041, 0.21176470816135406, 0.21176470816135406),
(0.80672270059585571, 0.22745098173618317, 0.22745098173618317),
(0.81092435121536255, 0.24313725531101227, 0.24313725531101227),
(0.81512606143951416, 0.25882354378700256, 0.25882354378700256),
(0.819327712059021, 0.27450981736183167, 0.27450981736183167),
(0.82352942228317261, 0.29019609093666077, 0.29019609093666077),
(0.82773107290267944, 0.30588236451148987, 0.30588236451148987),
(0.83193278312683105, 0.32156863808631897, 0.32156863808631897),
(0.83613443374633789, 0.33725491166114807, 0.33725491166114807),
(0.8403361439704895, 0.35294118523597717, 0.35294118523597717),
(0.84453779458999634, 0.36862745881080627, 0.36862745881080627),
(0.84873950481414795, 0.38431373238563538, 0.38431373238563538),
(0.85294115543365479, 0.40000000596046448, 0.40000000596046448),
(0.8571428656578064, 0.4117647111415863, 0.4117647111415863),
(0.86134451627731323, 0.42745098471641541, 0.42745098471641541),
(0.86554622650146484, 0.44313725829124451, 0.44313725829124451),
(0.86974787712097168, 0.45882353186607361, 0.45882353186607361),
(0.87394958734512329, 0.47450980544090271, 0.47450980544090271),
(0.87815123796463013, 0.49019607901573181, 0.49019607901573181),
(0.88235294818878174, 0.5215686559677124, 0.5215686559677124),
(0.88655459880828857, 0.5372549295425415, 0.5372549295425415),
(0.89075630903244019, 0.55294120311737061, 0.55294120311737061),
(0.89495795965194702, 0.56862747669219971, 0.56862747669219971),
(0.89915966987609863, 0.58431375026702881, 0.58431375026702881),
(0.90336132049560547, 0.60000002384185791, 0.60000002384185791),
(0.90756303071975708, 0.61176472902297974, 0.61176472902297974),
(0.91176468133926392, 0.62745100259780884, 0.62745100259780884),
(0.91596639156341553, 0.64313727617263794, 0.64313727617263794),
(0.92016804218292236, 0.65882354974746704, 0.65882354974746704),
(0.92436975240707397, 0.67450982332229614, 0.67450982332229614),
(0.92857140302658081, 0.69019609689712524, 0.69019609689712524),
(0.93277311325073242, 0.70588237047195435, 0.70588237047195435),
(0.93697476387023926, 0.72156864404678345, 0.72156864404678345),
(0.94117647409439087, 0.73725491762161255, 0.73725491762161255),
(0.94537812471389771, 0.75294119119644165, 0.75294119119644165),
(0.94957983493804932, 0.76862746477127075, 0.76862746477127075),
(0.95378148555755615, 0.78431373834609985, 0.78431373834609985),
(0.95798319578170776, 0.80000001192092896, 0.80000001192092896),
(0.9621848464012146, 0.81176471710205078, 0.81176471710205078),
(0.96638655662536621, 0.84313726425170898, 0.84313726425170898),
(0.97058820724487305, 0.85882353782653809, 0.85882353782653809),
(0.97478991746902466, 0.87450981140136719, 0.87450981140136719),
(0.97899156808853149, 0.89019608497619629, 0.89019608497619629),
(0.98319327831268311, 0.90588235855102539, 0.90588235855102539),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388,
0.0039215688593685627, 0.0039215688593685627), (0.48319327831268311,
0.011764706112444401, 0.011764706112444401), (0.48739495873451233,
0.019607843831181526, 0.019607843831181526), (0.49159663915634155,
0.027450980618596077, 0.027450980618596077), (0.49579831957817078,
0.035294119268655777, 0.035294119268655777), (0.5, 0.043137256056070328,
0.043137256056070328), (0.50420171022415161, 0.058823529630899429,
0.058823529630899429), (0.50840336084365845, 0.066666670143604279,
0.066666670143604279), (0.51260507106781006, 0.070588238537311554,
0.070588238537311554), (0.51680672168731689, 0.078431375324726105,
0.078431375324726105), (0.52100843191146851, 0.086274512112140656,
0.086274512112140656), (0.52521008253097534, 0.094117648899555206,
0.094117648899555206), (0.52941179275512695, 0.10196078568696976,
0.10196078568696976), (0.53361344337463379, 0.10980392247438431,
0.10980392247438431), (0.5378151535987854, 0.11764705926179886,
0.11764705926179886), (0.54201680421829224, 0.12549020349979401,
0.12549020349979401), (0.54621851444244385, 0.13725490868091583,
0.13725490868091583), (0.55042016506195068, 0.14509804546833038,
0.14509804546833038), (0.55462187528610229, 0.15294118225574493,
0.15294118225574493), (0.55882352590560913, 0.16078431904315948,
0.16078431904315948), (0.56302523612976074, 0.16862745583057404,
0.16862745583057404), (0.56722688674926758, 0.17647059261798859,
0.17647059261798859), (0.57142859697341919, 0.18431372940540314,
0.18431372940540314), (0.57563024759292603, 0.19215686619281769,
0.19215686619281769), (0.57983195781707764, 0.20000000298023224,
0.20000000298023224), (0.58403360843658447, 0.20392157137393951,
0.20392157137393951), (0.58823531866073608, 0.21176470816135406,
0.21176470816135406), (0.59243696928024292, 0.21960784494876862,
0.21960784494876862), (0.59663867950439453, 0.22745098173618317,
0.22745098173618317), (0.60084033012390137, 0.23529411852359772,
0.23529411852359772), (0.60504204034805298, 0.24313725531101227,
0.24313725531101227), (0.60924369096755981, 0.25098040699958801,
0.25098040699958801), (0.61344540119171143, 0.25882354378700256,
0.25882354378700256), (0.61764705181121826, 0.26666668057441711,
0.26666668057441711), (0.62184876203536987, 0.27058824896812439,
0.27058824896812439), (0.62605041265487671, 0.27843138575553894,
0.27843138575553894), (0.63025212287902832, 0.29411765933036804,
0.29411765933036804), (0.63445377349853516, 0.30196079611778259,
0.30196079611778259), (0.63865548372268677, 0.30980393290519714,
0.30980393290519714), (0.6428571343421936, 0.31764706969261169,
0.31764706969261169), (0.64705884456634521, 0.32549020648002625,
0.32549020648002625), (0.65126049518585205, 0.3333333432674408,
0.3333333432674408), (0.65546220541000366, 0.33725491166114807,
0.33725491166114807), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.35294118523597717,
0.35294118523597717), (0.66806721687316895, 0.36078432202339172,
0.36078432202339172), (0.67226892709732056, 0.36862745881080627,
0.36862745881080627), (0.67647057771682739, 0.37647059559822083,
0.37647059559822083), (0.680672287940979, 0.38431373238563538,
0.38431373238563538), (0.68487393856048584, 0.39215686917304993,
0.39215686917304993), (0.68907564878463745, 0.40000000596046448,
0.40000000596046448), (0.69327729940414429, 0.40392157435417175,
0.40392157435417175), (0.6974790096282959, 0.4117647111415863,
0.4117647111415863), (0.70168066024780273, 0.41960784792900085,
0.41960784792900085), (0.70588237047195435, 0.42745098471641541,
0.42745098471641541), (0.71008402109146118, 0.43529412150382996,
0.43529412150382996), (0.71428573131561279, 0.45098039507865906,
0.45098039507865906), (0.71848738193511963, 0.45882353186607361,
0.45882353186607361), (0.72268909215927124, 0.46666666865348816,
0.46666666865348816), (0.72689074277877808, 0.47058823704719543,
0.47058823704719543), (0.73109245300292969, 0.47843137383460999,
0.47843137383460999), (0.73529410362243652, 0.48627451062202454,
0.48627451062202454), (0.73949581384658813, 0.49411764740943909,
0.49411764740943909), (0.74369746446609497, 0.50196081399917603,
0.50196081399917603), (0.74789917469024658, 0.50980395078659058,
0.50980395078659058), (0.75210082530975342, 0.51764708757400513,
0.51764708757400513), (0.75630253553390503, 0.53333336114883423,
0.53333336114883423), (0.76050418615341187, 0.5372549295425415,
0.5372549295425415), (0.76470589637756348, 0.54509806632995605,
0.54509806632995605), (0.76890754699707031, 0.55294120311737061,
0.55294120311737061), (0.77310925722122192, 0.56078433990478516,
0.56078433990478516), (0.77731090784072876, 0.56862747669219971,
0.56862747669219971), (0.78151261806488037, 0.57647061347961426,
0.57647061347961426), (0.78571426868438721, 0.58431375026702881,
0.58431375026702881), (0.78991597890853882, 0.59215688705444336,
0.59215688705444336), (0.79411762952804565, 0.60000002384185791,
0.60000002384185791), (0.79831933975219727, 0.61176472902297974,
0.61176472902297974), (0.8025209903717041, 0.61960786581039429,
0.61960786581039429), (0.80672270059585571, 0.62745100259780884,
0.62745100259780884), (0.81092435121536255, 0.63529413938522339,
0.63529413938522339), (0.81512606143951416, 0.64313727617263794,
0.64313727617263794), (0.819327712059021, 0.65098041296005249,
0.65098041296005249), (0.82352942228317261, 0.65882354974746704,
0.65882354974746704), (0.82773107290267944, 0.66666668653488159,
0.66666668653488159), (0.83193278312683105, 0.67058825492858887,
0.67058825492858887), (0.83613443374633789, 0.67843139171600342,
0.67843139171600342), (0.8403361439704895, 0.68627452850341797,
0.68627452850341797), (0.84453779458999634, 0.69411766529083252,
0.69411766529083252), (0.84873950481414795, 0.70196080207824707,
0.70196080207824707), (0.85294115543365479, 0.70980393886566162,
0.70980393886566162), (0.8571428656578064, 0.71764707565307617,
0.71764707565307617), (0.86134451627731323, 0.72549021244049072,
0.72549021244049072), (0.86554622650146484, 0.73333334922790527,
0.73333334922790527), (0.86974787712097168, 0.73725491762161255,
0.73725491762161255), (0.87394958734512329, 0.7450980544090271,
0.7450980544090271), (0.87815123796463013, 0.75294119119644165,
0.75294119119644165), (0.88235294818878174, 0.76862746477127075,
0.76862746477127075), (0.88655459880828857, 0.7764706015586853,
0.7764706015586853), (0.89075630903244019, 0.78431373834609985,
0.78431373834609985), (0.89495795965194702, 0.7921568751335144,
0.7921568751335144), (0.89915966987609863, 0.80000001192092896,
0.80000001192092896), (0.90336132049560547, 0.80392158031463623,
0.80392158031463623), (0.90756303071975708, 0.81176471710205078,
0.81176471710205078), (0.91176468133926392, 0.81960785388946533,
0.81960785388946533), (0.91596639156341553, 0.82745099067687988,
0.82745099067687988), (0.92016804218292236, 0.83529412746429443,
0.83529412746429443), (0.92436975240707397, 0.84313726425170898,
0.84313726425170898), (0.92857140302658081, 0.85098040103912354,
0.85098040103912354), (0.93277311325073242, 0.85882353782653809,
0.85882353782653809), (0.93697476387023926, 0.86666667461395264,
0.86666667461395264), (0.94117647409439087, 0.87058824300765991,
0.87058824300765991), (0.94537812471389771, 0.87843137979507446,
0.87843137979507446), (0.94957983493804932, 0.88627451658248901,
0.88627451658248901), (0.95378148555755615, 0.89411765336990356,
0.89411765336990356), (0.95798319578170776, 0.90196079015731812,
0.90196079015731812), (0.9621848464012146, 0.90980392694473267,
0.90980392694473267), (0.96638655662536621, 0.92549020051956177,
0.92549020051956177), (0.97058820724487305, 0.93333333730697632,
0.93333333730697632), (0.97478991746902466, 0.93725490570068359,
0.93725490570068359), (0.97899156808853149, 0.94509804248809814,
0.94509804248809814), (0.98319327831268311, 0.9529411792755127,
0.9529411792755127), (0.98739492893218994, 0.96078431606292725,
0.96078431606292725), (0.99159663915634155, 0.9686274528503418,
0.9686274528503418), (0.99579828977584839, 0.97647058963775635,
0.97647058963775635), (1.0, 0.9843137264251709, 0.9843137264251709)],
'red': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.015686275437474251,
0.015686275437474251), (0.016806723549962044, 0.019607843831181526,
0.019607843831181526), (0.021008403971791267, 0.027450980618596077,
0.027450980618596077), (0.025210084393620491, 0.031372550874948502,
0.031372550874948502), (0.029411764815449715, 0.039215687662363052,
0.039215687662363052), (0.033613447099924088, 0.043137256056070328,
0.043137256056070328), (0.037815127521753311, 0.050980392843484879,
0.050980392843484879), (0.042016807943582535, 0.058823529630899429,
0.058823529630899429), (0.046218488365411758, 0.066666670143604279,
0.066666670143604279), (0.050420168787240982, 0.070588238537311554,
0.070588238537311554), (0.054621849209070206, 0.078431375324726105,
0.078431375324726105), (0.058823529630899429, 0.08235294371843338,
0.08235294371843338), (0.063025213778018951, 0.090196080505847931,
0.090196080505847931), (0.067226894199848175, 0.094117648899555206,
0.094117648899555206), (0.071428574621677399, 0.10196078568696976,
0.10196078568696976), (0.075630255043506622, 0.10588235408067703,
0.10588235408067703), (0.079831935465335846, 0.10980392247438431,
0.10980392247438431), (0.08403361588716507, 0.11764705926179886,
0.11764705926179886), (0.088235296308994293, 0.12156862765550613,
0.12156862765550613), (0.092436976730823517, 0.12941177189350128,
0.12941177189350128), (0.09663865715265274, 0.13333334028720856,
0.13333334028720856), (0.10084033757448196, 0.14117647707462311,
0.14117647707462311), (0.10504201799631119, 0.14509804546833038,
0.14509804546833038), (0.10924369841814041, 0.15294118225574493,
0.15294118225574493), (0.11344537883996964, 0.15686275064945221,
0.15686275064945221), (0.11764705926179886, 0.16470588743686676,
0.16470588743686676), (0.12184873968362808, 0.16862745583057404,
0.16862745583057404), (0.1260504275560379, 0.18039216101169586,
0.18039216101169586), (0.13025210797786713, 0.18431372940540314,
0.18431372940540314), (0.13445378839969635, 0.19215686619281769,
0.19215686619281769), (0.13865546882152557, 0.19607843458652496,
0.19607843458652496), (0.1428571492433548, 0.20392157137393951,
0.20392157137393951), (0.14705882966518402, 0.20784313976764679,
0.20784313976764679), (0.15126051008701324, 0.21568627655506134,
0.21568627655506134), (0.15546219050884247, 0.21960784494876862,
0.21960784494876862), (0.15966387093067169, 0.22352941334247589,
0.22352941334247589), (0.16386555135250092, 0.23137255012989044,
0.23137255012989044), (0.16806723177433014, 0.23529411852359772,
0.23529411852359772), (0.17226891219615936, 0.24313725531101227,
0.24313725531101227), (0.17647059261798859, 0.24705882370471954,
0.24705882370471954), (0.18067227303981781, 0.25490197539329529,
0.25490197539329529), (0.18487395346164703, 0.25882354378700256,
0.25882354378700256), (0.18907563388347626, 0.26666668057441711,
0.26666668057441711), (0.19327731430530548, 0.27058824896812439,
0.27058824896812439), (0.1974789947271347, 0.27450981736183167,
0.27450981736183167), (0.20168067514896393, 0.28235295414924622,
0.28235295414924622), (0.20588235557079315, 0.28627452254295349,
0.28627452254295349), (0.21008403599262238, 0.29803922772407532,
0.29803922772407532), (0.2142857164144516, 0.30588236451148987,
0.30588236451148987), (0.21848739683628082, 0.30980393290519714,
0.30980393290519714), (0.22268907725811005, 0.31764706969261169,
0.31764706969261169), (0.22689075767993927, 0.32156863808631897,
0.32156863808631897), (0.23109243810176849, 0.32941177487373352,
0.32941177487373352), (0.23529411852359772, 0.3333333432674408,
0.3333333432674408), (0.23949579894542694, 0.33725491166114807,
0.33725491166114807), (0.24369747936725616, 0.34509804844856262,
0.34509804844856262), (0.24789915978908539, 0.3490196168422699,
0.3490196168422699), (0.25210085511207581, 0.36078432202339172,
0.36078432202339172), (0.25630253553390503, 0.36862745881080627,
0.36862745881080627), (0.26050421595573425, 0.37254902720451355,
0.37254902720451355), (0.26470589637756348, 0.3803921639919281,
0.3803921639919281), (0.2689075767993927, 0.38431373238563538,
0.38431373238563538), (0.27310925722122192, 0.38823530077934265,
0.38823530077934265), (0.27731093764305115, 0.3960784375667572,
0.3960784375667572), (0.28151261806488037, 0.40000000596046448,
0.40000000596046448), (0.28571429848670959, 0.40784314274787903,
0.40784314274787903), (0.28991597890853882, 0.4117647111415863,
0.4117647111415863), (0.29411765933036804, 0.42352941632270813,
0.42352941632270813), (0.29831933975219727, 0.43137255311012268,
0.43137255311012268), (0.30252102017402649, 0.43529412150382996,
0.43529412150382996), (0.30672270059585571, 0.44313725829124451,
0.44313725829124451), (0.31092438101768494, 0.44705882668495178,
0.44705882668495178), (0.31512606143951416, 0.45098039507865906,
0.45098039507865906), (0.31932774186134338, 0.45882353186607361,
0.45882353186607361), (0.32352942228317261, 0.46274510025978088,
0.46274510025978088), (0.32773110270500183, 0.47058823704719543,
0.47058823704719543), (0.33193278312683105, 0.47450980544090271,
0.47450980544090271), (0.33613446354866028, 0.48235294222831726,
0.48235294222831726), (0.3403361439704895, 0.48627451062202454,
0.48627451062202454), (0.34453782439231873, 0.49411764740943909,
0.49411764740943909), (0.34873950481414795, 0.49803921580314636,
0.49803921580314636), (0.35294118523597717, 0.50196081399917603,
0.50196081399917603), (0.3571428656578064, 0.50980395078659058,
0.50980395078659058), (0.36134454607963562, 0.51372551918029785,
0.51372551918029785), (0.36554622650146484, 0.5215686559677124,
0.5215686559677124), (0.36974790692329407, 0.52549022436141968,
0.52549022436141968), (0.37394958734512329, 0.53333336114883423,
0.53333336114883423), (0.37815126776695251, 0.54509806632995605,
0.54509806632995605), (0.38235294818878174, 0.54901963472366333,
0.54901963472366333), (0.38655462861061096, 0.55294120311737061,
0.55294120311737061), (0.39075630903244019, 0.56078433990478516,
0.56078433990478516), (0.39495798945426941, 0.56470590829849243,
0.56470590829849243), (0.39915966987609863, 0.57254904508590698,
0.57254904508590698), (0.40336135029792786, 0.57647061347961426,
0.57647061347961426), (0.40756303071975708, 0.58431375026702881,
0.58431375026702881), (0.4117647111415863, 0.58823531866073608,
0.58823531866073608), (0.41596639156341553, 0.59607845544815063,
0.59607845544815063), (0.42016807198524475, 0.60000002384185791,
0.60000002384185791), (0.42436975240707397, 0.60784316062927246,
0.60784316062927246), (0.4285714328289032, 0.61176472902297974,
0.61176472902297974), (0.43277311325073242, 0.61568629741668701,
0.61568629741668701), (0.43697479367256165, 0.62352943420410156,
0.62352943420410156), (0.44117647409439087, 0.62745100259780884,
0.62745100259780884), (0.44537815451622009, 0.63529413938522339,
0.63529413938522339), (0.44957983493804932, 0.63921570777893066,
0.63921570777893066), (0.45378151535987854, 0.64705884456634521,
0.64705884456634521), (0.45798319578170776, 0.65098041296005249,
0.65098041296005249), (0.46218487620353699, 0.66274511814117432,
0.66274511814117432), (0.46638655662536621, 0.66666668653488159,
0.66666668653488159), (0.47058823704719543, 0.67450982332229614,
0.67450982332229614), (0.47478991746902466, 0.67843139171600342,
0.67843139171600342), (0.47899159789085388, 0.68627452850341797,
0.68627452850341797), (0.48319327831268311, 0.69019609689712524,
0.69019609689712524), (0.48739495873451233, 0.69803923368453979,
0.69803923368453979), (0.49159663915634155, 0.70196080207824707,
0.70196080207824707), (0.49579831957817078, 0.70980393886566162,
0.70980393886566162), (0.5, 0.7137255072593689, 0.7137255072593689),
(0.50420171022415161, 0.72549021244049072, 0.72549021244049072),
(0.50840336084365845, 0.729411780834198, 0.729411780834198),
(0.51260507106781006, 0.73725491762161255, 0.73725491762161255),
(0.51680672168731689, 0.74117648601531982, 0.74117648601531982),
(0.52100843191146851, 0.74901962280273438, 0.74901962280273438),
(0.52521008253097534, 0.75294119119644165, 0.75294119119644165),
(0.52941179275512695, 0.7607843279838562, 0.7607843279838562),
(0.53361344337463379, 0.76470589637756348, 0.76470589637756348),
(0.5378151535987854, 0.77254903316497803, 0.77254903316497803),
(0.54201680421829224, 0.7764706015586853, 0.7764706015586853),
(0.54621851444244385, 0.78823530673980713, 0.78823530673980713),
(0.55042016506195068, 0.7921568751335144, 0.7921568751335144),
(0.55462187528610229, 0.80000001192092896, 0.80000001192092896),
(0.55882352590560913, 0.80392158031463623, 0.80392158031463623),
(0.56302523612976074, 0.81176471710205078, 0.81176471710205078),
(0.56722688674926758, 0.81568628549575806, 0.81568628549575806),
(0.57142859697341919, 0.82352942228317261, 0.82352942228317261),
(0.57563024759292603, 0.82745099067687988, 0.82745099067687988),
(0.57983195781707764, 0.83137255907058716, 0.83137255907058716),
(0.58403360843658447, 0.83921569585800171, 0.83921569585800171),
(0.58823531866073608, 0.84313726425170898, 0.84313726425170898),
(0.59243696928024292, 0.85098040103912354, 0.85098040103912354),
(0.59663867950439453, 0.85490196943283081, 0.85490196943283081),
(0.60084033012390137, 0.86274510622024536, 0.86274510622024536),
(0.60504204034805298, 0.86666667461395264, 0.86666667461395264),
(0.60924369096755981, 0.87450981140136719, 0.87450981140136719),
(0.61344540119171143, 0.87843137979507446, 0.87843137979507446),
(0.61764705181121826, 0.88627451658248901, 0.88627451658248901),
(0.62184876203536987, 0.89019608497619629, 0.89019608497619629),
(0.62605041265487671, 0.89411765336990356, 0.89411765336990356),
(0.63025212287902832, 0.90588235855102539, 0.90588235855102539),
(0.63445377349853516, 0.91372549533843994, 0.91372549533843994),
(0.63865548372268677, 0.91764706373214722, 0.91764706373214722),
(0.6428571343421936, 0.92549020051956177, 0.92549020051956177),
(0.64705884456634521, 0.92941176891326904, 0.92941176891326904),
(0.65126049518585205, 0.93725490570068359, 0.93725490570068359),
(0.65546220541000366, 0.94117647409439087, 0.94117647409439087),
(0.6596638560295105, 0.94509804248809814, 0.94509804248809814),
(0.66386556625366211, 0.9529411792755127, 0.9529411792755127),
(0.66806721687316895, 0.95686274766921997, 0.95686274766921997),
(0.67226892709732056, 0.96470588445663452, 0.96470588445663452),
(0.67647057771682739, 0.9686274528503418, 0.9686274528503418),
(0.680672287940979, 0.97647058963775635, 0.97647058963775635),
(0.68487393856048584, 0.98039215803146362, 0.98039215803146362),
(0.68907564878463745, 0.98823529481887817, 0.98823529481887817),
(0.69327729940414429, 0.99215686321258545, 0.99215686321258545),
(0.6974790096282959, 1.0, 1.0), (0.70168066024780273, 1.0, 1.0),
(0.70588237047195435, 1.0, 1.0), (0.71008402109146118, 1.0, 1.0),
(0.71428573131561279, 1.0, 1.0), (0.71848738193511963, 1.0, 1.0),
(0.72268909215927124, 1.0, 1.0), (0.72689074277877808, 1.0, 1.0),
(0.73109245300292969, 1.0, 1.0), (0.73529410362243652, 1.0, 1.0),
(0.73949581384658813, 1.0, 1.0), (0.74369746446609497, 1.0, 1.0),
(0.74789917469024658, 1.0, 1.0), (0.75210082530975342, 1.0, 1.0),
(0.75630253553390503, 1.0, 1.0), (0.76050418615341187, 1.0, 1.0),
(0.76470589637756348, 1.0, 1.0), (0.76890754699707031, 1.0, 1.0),
(0.77310925722122192, 1.0, 1.0), (0.77731090784072876, 1.0, 1.0),
(0.78151261806488037, 1.0, 1.0), (0.78571426868438721, 1.0, 1.0),
(0.78991597890853882, 1.0, 1.0), (0.79411762952804565, 1.0, 1.0),
(0.79831933975219727, 1.0, 1.0), (0.8025209903717041, 1.0, 1.0),
(0.80672270059585571, 1.0, 1.0), (0.81092435121536255, 1.0, 1.0),
(0.81512606143951416, 1.0, 1.0), (0.819327712059021, 1.0, 1.0),
(0.82352942228317261, 1.0, 1.0), (0.82773107290267944, 1.0, 1.0),
(0.83193278312683105, 1.0, 1.0), (0.83613443374633789, 1.0, 1.0),
(0.8403361439704895, 1.0, 1.0), (0.84453779458999634, 1.0, 1.0),
(0.84873950481414795, 1.0, 1.0), (0.85294115543365479, 1.0, 1.0),
(0.8571428656578064, 1.0, 1.0), (0.86134451627731323, 1.0, 1.0),
(0.86554622650146484, 1.0, 1.0), (0.86974787712097168, 1.0, 1.0),
(0.87394958734512329, 1.0, 1.0), (0.87815123796463013, 1.0, 1.0),
(0.88235294818878174, 1.0, 1.0), (0.88655459880828857, 1.0, 1.0),
(0.89075630903244019, 1.0, 1.0), (0.89495795965194702, 1.0, 1.0),
(0.89915966987609863, 1.0, 1.0), (0.90336132049560547, 1.0, 1.0),
(0.90756303071975708, 1.0, 1.0), (0.91176468133926392, 1.0, 1.0),
(0.91596639156341553, 1.0, 1.0), (0.92016804218292236, 1.0, 1.0),
(0.92436975240707397, 1.0, 1.0), (0.92857140302658081, 1.0, 1.0),
(0.93277311325073242, 1.0, 1.0), (0.93697476387023926, 1.0, 1.0),
(0.94117647409439087, 1.0, 1.0), (0.94537812471389771, 1.0, 1.0),
(0.94957983493804932, 1.0, 1.0), (0.95378148555755615, 1.0, 1.0),
(0.95798319578170776, 1.0, 1.0), (0.9621848464012146, 1.0, 1.0),
(0.96638655662536621, 1.0, 1.0), (0.97058820724487305, 1.0, 1.0),
(0.97478991746902466, 1.0, 1.0), (0.97899156808853149, 1.0, 1.0),
(0.98319327831268311, 1.0, 1.0), (0.98739492893218994, 1.0, 1.0),
(0.99159663915634155, 1.0, 1.0), (0.99579828977584839, 1.0, 1.0), (1.0,
1.0, 1.0)]}
_gist_ncar_data = {'blue': [(0.0, 0.50196081399917603,
0.50196081399917603), (0.0050505050458014011, 0.45098039507865906,
0.45098039507865906), (0.010101010091602802, 0.40392157435417175,
0.40392157435417175), (0.015151515603065491, 0.35686275362968445,
0.35686275362968445), (0.020202020183205605, 0.30980393290519714,
0.30980393290519714), (0.025252524763345718, 0.25882354378700256,
0.25882354378700256), (0.030303031206130981, 0.21176470816135406,
0.21176470816135406), (0.035353533923625946, 0.16470588743686676,
0.16470588743686676), (0.040404040366411209, 0.11764705926179886,
0.11764705926179886), (0.045454546809196472, 0.070588238537311554,
0.070588238537311554), (0.050505049526691437, 0.019607843831181526,
0.019607843831181526), (0.0555555559694767, 0.047058824449777603,
0.047058824449777603), (0.060606062412261963, 0.14509804546833038,
0.14509804546833038), (0.065656565129756927, 0.23921568691730499,
0.23921568691730499), (0.070707067847251892, 0.3333333432674408,
0.3333333432674408), (0.075757578015327454, 0.43137255311012268,
0.43137255311012268), (0.080808080732822418, 0.52549022436141968,
0.52549022436141968), (0.085858583450317383, 0.61960786581039429,
0.61960786581039429), (0.090909093618392944, 0.71764707565307617,
0.71764707565307617), (0.095959596335887909, 0.81176471710205078,
0.81176471710205078), (0.10101009905338287, 0.90588235855102539,
0.90588235855102539), (0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0), (0.42424243688583374,
0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691,
0.027450980618596077, 0.027450980618596077), (0.43434342741966248,
0.050980392843484879, 0.050980392843484879), (0.43939393758773804,
0.074509806931018829, 0.074509806931018829), (0.4444444477558136,
0.094117648899555206, 0.094117648899555206), (0.44949495792388916,
0.11764705926179886, 0.11764705926179886), (0.45454546809196472,
0.14117647707462311, 0.14117647707462311), (0.4595959484577179,
0.16470588743686676, 0.16470588743686676), (0.46464645862579346,
0.18823529779911041, 0.18823529779911041), (0.46969696879386902,
0.21176470816135406, 0.21176470816135406), (0.47474747896194458,
0.23529411852359772, 0.23529411852359772), (0.47979798913002014,
0.22352941334247589, 0.22352941334247589), (0.4848484992980957,
0.20000000298023224, 0.20000000298023224), (0.48989897966384888,
0.17647059261798859, 0.17647059261798859), (0.49494948983192444,
0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128,
0.12941177189350128), (0.50505048036575317, 0.10980392247438431,
0.10980392247438431), (0.51010102033615112, 0.086274512112140656,
0.086274512112140656), (0.5151515007019043, 0.062745101749897003,
0.062745101749897003), (0.52020204067230225, 0.039215687662363052,
0.039215687662363052), (0.52525252103805542, 0.015686275437474251,
0.015686275437474251), (0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0), (0.74242424964904785,
0.031372550874948502, 0.031372550874948502), (0.74747473001480103,
0.12941177189350128, 0.12941177189350128), (0.75252526998519897,
0.22352941334247589, 0.22352941334247589), (0.75757575035095215,
0.32156863808631897, 0.32156863808631897), (0.7626262903213501,
0.41568627953529358, 0.41568627953529358), (0.76767677068710327,
0.50980395078659058, 0.50980395078659058), (0.77272725105285645,
0.60784316062927246, 0.60784316062927246), (0.77777779102325439,
0.70196080207824707, 0.70196080207824707), (0.78282827138900757,
0.79607844352722168, 0.79607844352722168), (0.78787881135940552,
0.89411765336990356, 0.89411765336990356), (0.79292929172515869,
0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0,
1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0), (0.84848487377166748,
0.99607843160629272, 0.99607843160629272), (0.85353535413742065,
0.98823529481887817, 0.98823529481887817), (0.85858583450317383,
0.9843137264251709, 0.9843137264251709), (0.86363637447357178,
0.97647058963775635, 0.97647058963775635), (0.86868685483932495,
0.9686274528503418, 0.9686274528503418), (0.8737373948097229,
0.96470588445663452, 0.96470588445663452), (0.87878787517547607,
0.95686274766921997, 0.95686274766921997), (0.88383835554122925,
0.94901961088180542, 0.94901961088180542), (0.8888888955116272,
0.94509804248809814, 0.94509804248809814), (0.89393937587738037,
0.93725490570068359, 0.93725490570068359), (0.89898991584777832,
0.93333333730697632, 0.93333333730697632), (0.90404039621353149,
0.93333333730697632, 0.93333333730697632), (0.90909093618392944,
0.93725490570068359, 0.93725490570068359), (0.91414141654968262,
0.93725490570068359, 0.93725490570068359), (0.91919189691543579,
0.94117647409439087, 0.94117647409439087), (0.92424243688583374,
0.94509804248809814, 0.94509804248809814), (0.92929291725158691,
0.94509804248809814, 0.94509804248809814), (0.93434345722198486,
0.94901961088180542, 0.94901961088180542), (0.93939393758773804,
0.9529411792755127, 0.9529411792755127), (0.94444441795349121,
0.9529411792755127, 0.9529411792755127), (0.94949495792388916,
0.95686274766921997, 0.95686274766921997), (0.95454543828964233,
0.96078431606292725, 0.96078431606292725), (0.95959597826004028,
0.96470588445663452, 0.96470588445663452), (0.96464645862579346,
0.9686274528503418, 0.9686274528503418), (0.96969699859619141,
0.97254902124404907, 0.97254902124404907), (0.97474747896194458,
0.97647058963775635, 0.97647058963775635), (0.97979795932769775,
0.98039215803146362, 0.98039215803146362), (0.9848484992980957,
0.9843137264251709, 0.9843137264251709), (0.98989897966384888,
0.98823529481887817, 0.98823529481887817), (0.99494951963424683,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)], 'green': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.035294119268655777, 0.035294119268655777), (0.010101010091602802,
0.074509806931018829, 0.074509806931018829), (0.015151515603065491,
0.10980392247438431, 0.10980392247438431), (0.020202020183205605,
0.14901961386203766, 0.14901961386203766), (0.025252524763345718,
0.18431372940540314, 0.18431372940540314), (0.030303031206130981,
0.22352941334247589, 0.22352941334247589), (0.035353533923625946,
0.25882354378700256, 0.25882354378700256), (0.040404040366411209,
0.29803922772407532, 0.29803922772407532), (0.045454546809196472,
0.3333333432674408, 0.3333333432674408), (0.050505049526691437,
0.37254902720451355, 0.37254902720451355), (0.0555555559694767,
0.36862745881080627, 0.36862745881080627), (0.060606062412261963,
0.3333333432674408, 0.3333333432674408), (0.065656565129756927,
0.29411765933036804, 0.29411765933036804), (0.070707067847251892,
0.25882354378700256, 0.25882354378700256), (0.075757578015327454,
0.21960784494876862, 0.21960784494876862), (0.080808080732822418,
0.18431372940540314, 0.18431372940540314), (0.085858583450317383,
0.14509804546833038, 0.14509804546833038), (0.090909093618392944,
0.10980392247438431, 0.10980392247438431), (0.095959596335887909,
0.070588238537311554, 0.070588238537311554), (0.10101009905338287,
0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0,
0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317,
1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0,
1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0), (0.79797977209091187,
0.015686275437474251, 0.015686275437474251), (0.80303031206130981,
0.031372550874948502, 0.031372550874948502), (0.80808079242706299,
0.050980392843484879, 0.050980392843484879), (0.81313133239746094,
0.066666670143604279, 0.066666670143604279), (0.81818181276321411,
0.086274512112140656, 0.086274512112140656), (0.82323235273361206,
0.10588235408067703, 0.10588235408067703), (0.82828283309936523,
0.12156862765550613, 0.12156862765550613), (0.83333331346511841,
0.14117647707462311, 0.14117647707462311), (0.83838385343551636,
0.15686275064945221, 0.15686275064945221), (0.84343433380126953,
0.17647059261798859, 0.17647059261798859), (0.84848487377166748,
0.20000000298023224, 0.20000000298023224), (0.85353535413742065,
0.23137255012989044, 0.23137255012989044), (0.85858583450317383,
0.25882354378700256, 0.25882354378700256), (0.86363637447357178,
0.29019609093666077, 0.29019609093666077), (0.86868685483932495,
0.32156863808631897, 0.32156863808631897), (0.8737373948097229,
0.35294118523597717, 0.35294118523597717), (0.87878787517547607,
0.38431373238563538, 0.38431373238563538), (0.88383835554122925,
0.41568627953529358, 0.41568627953529358), (0.8888888955116272,
0.44313725829124451, 0.44313725829124451), (0.89393937587738037,
0.47450980544090271, 0.47450980544090271), (0.89898991584777832,
0.5058823823928833, 0.5058823823928833), (0.90404039621353149,
0.52941179275512695, 0.52941179275512695), (0.90909093618392944,
0.55294120311737061, 0.55294120311737061), (0.91414141654968262,
0.57254904508590698, 0.57254904508590698), (0.91919189691543579,
0.59607845544815063, 0.59607845544815063), (0.92424243688583374,
0.61960786581039429, 0.61960786581039429), (0.92929291725158691,
0.64313727617263794, 0.64313727617263794), (0.93434345722198486,
0.66274511814117432, 0.66274511814117432), (0.93939393758773804,
0.68627452850341797, 0.68627452850341797), (0.94444441795349121,
0.70980393886566162, 0.70980393886566162), (0.94949495792388916,
0.729411780834198, 0.729411780834198), (0.95454543828964233,
0.75294119119644165, 0.75294119119644165), (0.95959597826004028,
0.78039216995239258, 0.78039216995239258), (0.96464645862579346,
0.80392158031463623, 0.80392158031463623), (0.96969699859619141,
0.82745099067687988, 0.82745099067687988), (0.97474747896194458,
0.85098040103912354, 0.85098040103912354), (0.97979795932769775,
0.87450981140136719, 0.87450981140136719), (0.9848484992980957,
0.90196079015731812, 0.90196079015731812), (0.98989897966384888,
0.92549020051956177, 0.92549020051956177), (0.99494951963424683,
0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907,
0.97254902124404907)], 'red': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0,
0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0), (0.31818181276321411,
0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967,
0.043137256056070328, 0.043137256056070328), (0.32828283309936523,
0.08235294371843338, 0.08235294371843338), (0.3333333432674408,
0.11764705926179886, 0.11764705926179886), (0.33838382363319397,
0.15686275064945221, 0.15686275064945221), (0.34343433380126953,
0.19607843458652496, 0.19607843458652496), (0.34848484396934509,
0.23137255012989044, 0.23137255012989044), (0.35353535413742065,
0.27058824896812439, 0.27058824896812439), (0.35858586430549622,
0.30980393290519714, 0.30980393290519714), (0.36363637447357178,
0.3490196168422699, 0.3490196168422699), (0.36868685483932495,
0.38431373238563538, 0.38431373238563538), (0.37373736500740051,
0.40392157435417175, 0.40392157435417175), (0.37878787517547607,
0.41568627953529358, 0.41568627953529358), (0.38383838534355164,
0.42352941632270813, 0.42352941632270813), (0.3888888955116272,
0.43137255311012268, 0.43137255311012268), (0.39393940567970276,
0.44313725829124451, 0.44313725829124451), (0.39898988604545593,
0.45098039507865906, 0.45098039507865906), (0.40404039621353149,
0.45882353186607361, 0.45882353186607361), (0.40909090638160706,
0.47058823704719543, 0.47058823704719543), (0.41414141654968262,
0.47843137383460999, 0.47843137383460999), (0.41919192671775818,
0.49019607901573181, 0.49019607901573181), (0.42424243688583374,
0.50196081399917603, 0.50196081399917603), (0.42929291725158691,
0.52549022436141968, 0.52549022436141968), (0.43434342741966248,
0.54901963472366333, 0.54901963472366333), (0.43939393758773804,
0.57254904508590698, 0.57254904508590698), (0.4444444477558136,
0.60000002384185791, 0.60000002384185791), (0.44949495792388916,
0.62352943420410156, 0.62352943420410156), (0.45454546809196472,
0.64705884456634521, 0.64705884456634521), (0.4595959484577179,
0.67058825492858887, 0.67058825492858887), (0.46464645862579346,
0.69411766529083252, 0.69411766529083252), (0.46969696879386902,
0.72156864404678345, 0.72156864404678345), (0.47474747896194458,
0.7450980544090271, 0.7450980544090271), (0.47979798913002014,
0.76862746477127075, 0.76862746477127075), (0.4848484992980957,
0.7921568751335144, 0.7921568751335144), (0.48989897966384888,
0.81568628549575806, 0.81568628549575806), (0.49494948983192444,
0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536,
0.86274510622024536), (0.50505048036575317, 0.88627451658248901,
0.88627451658248901), (0.51010102033615112, 0.90980392694473267,
0.90980392694473267), (0.5151515007019043, 0.93333333730697632,
0.93333333730697632), (0.52020204067230225, 0.95686274766921997,
0.95686274766921997), (0.52525252103805542, 0.98039215803146362,
0.98039215803146362), (0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)]}
_gist_rainbow_data = {'blue':
[(0.0, 0.16470588743686676, 0.16470588743686676), (0.0042016808874905109,
0.14117647707462311, 0.14117647707462311), (0.0084033617749810219,
0.12156862765550613, 0.12156862765550613), (0.012605042196810246,
0.10196078568696976, 0.10196078568696976), (0.016806723549962044,
0.078431375324726105, 0.078431375324726105), (0.021008403971791267,
0.058823529630899429, 0.058823529630899429), (0.025210084393620491,
0.039215687662363052, 0.039215687662363052), (0.029411764815449715,
0.015686275437474251, 0.015686275437474251), (0.033613447099924088, 0.0,
0.0), (0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0039215688593685627, 0.0039215688593685627),
(0.4117647111415863, 0.047058824449777603, 0.047058824449777603),
(0.41596639156341553, 0.066666670143604279, 0.066666670143604279),
(0.42016807198524475, 0.090196080505847931, 0.090196080505847931),
(0.42436975240707397, 0.10980392247438431, 0.10980392247438431),
(0.4285714328289032, 0.12941177189350128, 0.12941177189350128),
(0.43277311325073242, 0.15294118225574493, 0.15294118225574493),
(0.43697479367256165, 0.17254902422428131, 0.17254902422428131),
(0.44117647409439087, 0.19215686619281769, 0.19215686619281769),
(0.44537815451622009, 0.21568627655506134, 0.21568627655506134),
(0.44957983493804932, 0.23529411852359772, 0.23529411852359772),
(0.45378151535987854, 0.25882354378700256, 0.25882354378700256),
(0.45798319578170776, 0.27843138575553894, 0.27843138575553894),
(0.46218487620353699, 0.29803922772407532, 0.29803922772407532),
(0.46638655662536621, 0.32156863808631897, 0.32156863808631897),
(0.47058823704719543, 0.34117648005485535, 0.34117648005485535),
(0.47478991746902466, 0.38431373238563538, 0.38431373238563538),
(0.47899159789085388, 0.40392157435417175, 0.40392157435417175),
(0.48319327831268311, 0.42745098471641541, 0.42745098471641541),
(0.48739495873451233, 0.44705882668495178, 0.44705882668495178),
(0.49159663915634155, 0.46666666865348816, 0.46666666865348816),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.50980395078659058, 0.50980395078659058), (0.50420171022415161,
0.52941179275512695, 0.52941179275512695), (0.50840336084365845,
0.55294120311737061, 0.55294120311737061), (0.51260507106781006,
0.57254904508590698, 0.57254904508590698), (0.51680672168731689,
0.59607845544815063, 0.59607845544815063), (0.52100843191146851,
0.61568629741668701, 0.61568629741668701), (0.52521008253097534,
0.63529413938522339, 0.63529413938522339), (0.52941179275512695,
0.65882354974746704, 0.65882354974746704), (0.53361344337463379,
0.67843139171600342, 0.67843139171600342), (0.5378151535987854,
0.72156864404678345, 0.72156864404678345), (0.54201680421829224,
0.74117648601531982, 0.74117648601531982), (0.54621851444244385,
0.76470589637756348, 0.76470589637756348), (0.55042016506195068,
0.78431373834609985, 0.78431373834609985), (0.55462187528610229,
0.80392158031463623, 0.80392158031463623), (0.55882352590560913,
0.82745099067687988, 0.82745099067687988), (0.56302523612976074,
0.84705883264541626, 0.84705883264541626), (0.56722688674926758,
0.87058824300765991, 0.87058824300765991), (0.57142859697341919,
0.89019608497619629, 0.89019608497619629), (0.57563024759292603,
0.90980392694473267, 0.90980392694473267), (0.57983195781707764,
0.93333333730697632, 0.93333333730697632), (0.58403360843658447,
0.9529411792755127, 0.9529411792755127), (0.58823531866073608,
0.97254902124404907, 0.97254902124404907), (0.59243696928024292,
0.99607843160629272, 0.99607843160629272), (0.59663867950439453, 1.0,
1.0), (0.60084033012390137, 1.0, 1.0), (0.60504204034805298, 1.0, 1.0),
(0.60924369096755981, 1.0, 1.0), (0.61344540119171143, 1.0, 1.0),
(0.61764705181121826, 1.0, 1.0), (0.62184876203536987, 1.0, 1.0),
(0.62605041265487671, 1.0, 1.0), (0.63025212287902832, 1.0, 1.0),
(0.63445377349853516, 1.0, 1.0), (0.63865548372268677, 1.0, 1.0),
(0.6428571343421936, 1.0, 1.0), (0.64705884456634521, 1.0, 1.0),
(0.65126049518585205, 1.0, 1.0), (0.65546220541000366, 1.0, 1.0),
(0.6596638560295105, 1.0, 1.0), (0.66386556625366211, 1.0, 1.0),
(0.66806721687316895, 1.0, 1.0), (0.67226892709732056, 1.0, 1.0),
(0.67647057771682739, 1.0, 1.0), (0.680672287940979, 1.0, 1.0),
(0.68487393856048584, 1.0, 1.0), (0.68907564878463745, 1.0, 1.0),
(0.69327729940414429, 1.0, 1.0), (0.6974790096282959, 1.0, 1.0),
(0.70168066024780273, 1.0, 1.0), (0.70588237047195435, 1.0, 1.0),
(0.71008402109146118, 1.0, 1.0), (0.71428573131561279, 1.0, 1.0),
(0.71848738193511963, 1.0, 1.0), (0.72268909215927124, 1.0, 1.0),
(0.72689074277877808, 1.0, 1.0), (0.73109245300292969, 1.0, 1.0),
(0.73529410362243652, 1.0, 1.0), (0.73949581384658813, 1.0, 1.0),
(0.74369746446609497, 1.0, 1.0), (0.74789917469024658, 1.0, 1.0),
(0.75210082530975342, 1.0, 1.0), (0.75630253553390503, 1.0, 1.0),
(0.76050418615341187, 1.0, 1.0), (0.76470589637756348, 1.0, 1.0),
(0.76890754699707031, 1.0, 1.0), (0.77310925722122192, 1.0, 1.0),
(0.77731090784072876, 1.0, 1.0), (0.78151261806488037, 1.0, 1.0),
(0.78571426868438721, 1.0, 1.0), (0.78991597890853882, 1.0, 1.0),
(0.79411762952804565, 1.0, 1.0), (0.79831933975219727, 1.0, 1.0),
(0.8025209903717041, 1.0, 1.0), (0.80672270059585571, 1.0, 1.0),
(0.81092435121536255, 1.0, 1.0), (0.81512606143951416, 1.0, 1.0),
(0.819327712059021, 1.0, 1.0), (0.82352942228317261, 1.0, 1.0),
(0.82773107290267944, 1.0, 1.0), (0.83193278312683105, 1.0, 1.0),
(0.83613443374633789, 1.0, 1.0), (0.8403361439704895, 1.0, 1.0),
(0.84453779458999634, 1.0, 1.0), (0.84873950481414795, 1.0, 1.0),
(0.85294115543365479, 1.0, 1.0), (0.8571428656578064, 1.0, 1.0),
(0.86134451627731323, 1.0, 1.0), (0.86554622650146484, 1.0, 1.0),
(0.86974787712097168, 1.0, 1.0), (0.87394958734512329, 1.0, 1.0),
(0.87815123796463013, 1.0, 1.0), (0.88235294818878174, 1.0, 1.0),
(0.88655459880828857, 1.0, 1.0), (0.89075630903244019, 1.0, 1.0),
(0.89495795965194702, 1.0, 1.0), (0.89915966987609863, 1.0, 1.0),
(0.90336132049560547, 1.0, 1.0), (0.90756303071975708, 1.0, 1.0),
(0.91176468133926392, 1.0, 1.0), (0.91596639156341553, 1.0, 1.0),
(0.92016804218292236, 1.0, 1.0), (0.92436975240707397, 1.0, 1.0),
(0.92857140302658081, 1.0, 1.0), (0.93277311325073242, 1.0, 1.0),
(0.93697476387023926, 1.0, 1.0), (0.94117647409439087, 1.0, 1.0),
(0.94537812471389771, 1.0, 1.0), (0.94957983493804932, 1.0, 1.0),
(0.95378148555755615, 1.0, 1.0), (0.95798319578170776, 1.0, 1.0),
(0.9621848464012146, 1.0, 1.0), (0.96638655662536621, 0.99607843160629272,
0.99607843160629272), (0.97058820724487305, 0.97647058963775635,
0.97647058963775635), (0.97478991746902466, 0.9529411792755127,
0.9529411792755127), (0.97899156808853149, 0.91372549533843994,
0.91372549533843994), (0.98319327831268311, 0.89019608497619629,
0.89019608497619629), (0.98739492893218994, 0.87058824300765991,
0.87058824300765991), (0.99159663915634155, 0.85098040103912354,
0.85098040103912354), (0.99579828977584839, 0.82745099067687988,
0.82745099067687988), (1.0, 0.80784314870834351, 0.80784314870834351)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.019607843831181526, 0.019607843831181526),
(0.037815127521753311, 0.043137256056070328, 0.043137256056070328),
(0.042016807943582535, 0.062745101749897003, 0.062745101749897003),
(0.046218488365411758, 0.086274512112140656, 0.086274512112140656),
(0.050420168787240982, 0.10588235408067703, 0.10588235408067703),
(0.054621849209070206, 0.12549020349979401, 0.12549020349979401),
(0.058823529630899429, 0.14901961386203766, 0.14901961386203766),
(0.063025213778018951, 0.16862745583057404, 0.16862745583057404),
(0.067226894199848175, 0.18823529779911041, 0.18823529779911041),
(0.071428574621677399, 0.21176470816135406, 0.21176470816135406),
(0.075630255043506622, 0.23137255012989044, 0.23137255012989044),
(0.079831935465335846, 0.25490197539329529, 0.25490197539329529),
(0.08403361588716507, 0.27450981736183167, 0.27450981736183167),
(0.088235296308994293, 0.29411765933036804, 0.29411765933036804),
(0.092436976730823517, 0.31764706969261169, 0.31764706969261169),
(0.09663865715265274, 0.35686275362968445, 0.35686275362968445),
(0.10084033757448196, 0.3803921639919281, 0.3803921639919281),
(0.10504201799631119, 0.40000000596046448, 0.40000000596046448),
(0.10924369841814041, 0.42352941632270813, 0.42352941632270813),
(0.11344537883996964, 0.44313725829124451, 0.44313725829124451),
(0.11764705926179886, 0.46274510025978088, 0.46274510025978088),
(0.12184873968362808, 0.48627451062202454, 0.48627451062202454),
(0.1260504275560379, 0.5058823823928833, 0.5058823823928833),
(0.13025210797786713, 0.52941179275512695, 0.52941179275512695),
(0.13445378839969635, 0.54901963472366333, 0.54901963472366333),
(0.13865546882152557, 0.56862747669219971, 0.56862747669219971),
(0.1428571492433548, 0.59215688705444336, 0.59215688705444336),
(0.14705882966518402, 0.61176472902297974, 0.61176472902297974),
(0.15126051008701324, 0.63137257099151611, 0.63137257099151611),
(0.15546219050884247, 0.65490198135375977, 0.65490198135375977),
(0.15966387093067169, 0.69803923368453979, 0.69803923368453979),
(0.16386555135250092, 0.71764707565307617, 0.71764707565307617),
(0.16806723177433014, 0.73725491762161255, 0.73725491762161255),
(0.17226891219615936, 0.7607843279838562, 0.7607843279838562),
(0.17647059261798859, 0.78039216995239258, 0.78039216995239258),
(0.18067227303981781, 0.80000001192092896, 0.80000001192092896),
(0.18487395346164703, 0.82352942228317261, 0.82352942228317261),
(0.18907563388347626, 0.84313726425170898, 0.84313726425170898),
(0.19327731430530548, 0.86666667461395264, 0.86666667461395264),
(0.1974789947271347, 0.88627451658248901, 0.88627451658248901),
(0.20168067514896393, 0.90588235855102539, 0.90588235855102539),
(0.20588235557079315, 0.92941176891326904, 0.92941176891326904),
(0.21008403599262238, 0.94901961088180542, 0.94901961088180542),
(0.2142857164144516, 0.9686274528503418, 0.9686274528503418),
(0.21848739683628082, 0.99215686321258545, 0.99215686321258545),
(0.22268907725811005, 1.0, 1.0), (0.22689075767993927, 1.0, 1.0),
(0.23109243810176849, 1.0, 1.0), (0.23529411852359772, 1.0, 1.0),
(0.23949579894542694, 1.0, 1.0), (0.24369747936725616, 1.0, 1.0),
(0.24789915978908539, 1.0, 1.0), (0.25210085511207581, 1.0, 1.0),
(0.25630253553390503, 1.0, 1.0), (0.26050421595573425, 1.0, 1.0),
(0.26470589637756348, 1.0, 1.0), (0.2689075767993927, 1.0, 1.0),
(0.27310925722122192, 1.0, 1.0), (0.27731093764305115, 1.0, 1.0),
(0.28151261806488037, 1.0, 1.0), (0.28571429848670959, 1.0, 1.0),
(0.28991597890853882, 1.0, 1.0), (0.29411765933036804, 1.0, 1.0),
(0.29831933975219727, 1.0, 1.0), (0.30252102017402649, 1.0, 1.0),
(0.30672270059585571, 1.0, 1.0), (0.31092438101768494, 1.0, 1.0),
(0.31512606143951416, 1.0, 1.0), (0.31932774186134338, 1.0, 1.0),
(0.32352942228317261, 1.0, 1.0), (0.32773110270500183, 1.0, 1.0),
(0.33193278312683105, 1.0, 1.0), (0.33613446354866028, 1.0, 1.0),
(0.3403361439704895, 1.0, 1.0), (0.34453782439231873, 1.0, 1.0),
(0.34873950481414795, 1.0, 1.0), (0.35294118523597717, 1.0, 1.0),
(0.3571428656578064, 1.0, 1.0), (0.36134454607963562, 1.0, 1.0),
(0.36554622650146484, 1.0, 1.0), (0.36974790692329407, 1.0, 1.0),
(0.37394958734512329, 1.0, 1.0), (0.37815126776695251, 1.0, 1.0),
(0.38235294818878174, 1.0, 1.0), (0.38655462861061096, 1.0, 1.0),
(0.39075630903244019, 1.0, 1.0), (0.39495798945426941, 1.0, 1.0),
(0.39915966987609863, 1.0, 1.0), (0.40336135029792786, 1.0, 1.0),
(0.40756303071975708, 1.0, 1.0), (0.4117647111415863, 1.0, 1.0),
(0.41596639156341553, 1.0, 1.0), (0.42016807198524475, 1.0, 1.0),
(0.42436975240707397, 1.0, 1.0), (0.4285714328289032, 1.0, 1.0),
(0.43277311325073242, 1.0, 1.0), (0.43697479367256165, 1.0, 1.0),
(0.44117647409439087, 1.0, 1.0), (0.44537815451622009, 1.0, 1.0),
(0.44957983493804932, 1.0, 1.0), (0.45378151535987854, 1.0, 1.0),
(0.45798319578170776, 1.0, 1.0), (0.46218487620353699, 1.0, 1.0),
(0.46638655662536621, 1.0, 1.0), (0.47058823704719543, 1.0, 1.0),
(0.47478991746902466, 1.0, 1.0), (0.47899159789085388, 1.0, 1.0),
(0.48319327831268311, 1.0, 1.0), (0.48739495873451233, 1.0, 1.0),
(0.49159663915634155, 1.0, 1.0), (0.49579831957817078, 1.0, 1.0), (0.5,
1.0, 1.0), (0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 1.0,
1.0), (0.51260507106781006, 1.0, 1.0), (0.51680672168731689, 1.0, 1.0),
(0.52100843191146851, 1.0, 1.0), (0.52521008253097534, 1.0, 1.0),
(0.52941179275512695, 1.0, 1.0), (0.53361344337463379, 1.0, 1.0),
(0.5378151535987854, 1.0, 1.0), (0.54201680421829224, 1.0, 1.0),
(0.54621851444244385, 1.0, 1.0), (0.55042016506195068, 1.0, 1.0),
(0.55462187528610229, 1.0, 1.0), (0.55882352590560913, 1.0, 1.0),
(0.56302523612976074, 1.0, 1.0), (0.56722688674926758, 1.0, 1.0),
(0.57142859697341919, 1.0, 1.0), (0.57563024759292603, 1.0, 1.0),
(0.57983195781707764, 1.0, 1.0), (0.58403360843658447, 1.0, 1.0),
(0.58823531866073608, 1.0, 1.0), (0.59243696928024292, 1.0, 1.0),
(0.59663867950439453, 0.98039215803146362, 0.98039215803146362),
(0.60084033012390137, 0.93725490570068359, 0.93725490570068359),
(0.60504204034805298, 0.91764706373214722, 0.91764706373214722),
(0.60924369096755981, 0.89411765336990356, 0.89411765336990356),
(0.61344540119171143, 0.87450981140136719, 0.87450981140136719),
(0.61764705181121826, 0.85490196943283081, 0.85490196943283081),
(0.62184876203536987, 0.83137255907058716, 0.83137255907058716),
(0.62605041265487671, 0.81176471710205078, 0.81176471710205078),
(0.63025212287902832, 0.78823530673980713, 0.78823530673980713),
(0.63445377349853516, 0.76862746477127075, 0.76862746477127075),
(0.63865548372268677, 0.74901962280273438, 0.74901962280273438),
(0.6428571343421936, 0.72549021244049072, 0.72549021244049072),
(0.64705884456634521, 0.70588237047195435, 0.70588237047195435),
(0.65126049518585205, 0.68235296010971069, 0.68235296010971069),
(0.65546220541000366, 0.66274511814117432, 0.66274511814117432),
(0.6596638560295105, 0.64313727617263794, 0.64313727617263794),
(0.66386556625366211, 0.60000002384185791, 0.60000002384185791),
(0.66806721687316895, 0.58039218187332153, 0.58039218187332153),
(0.67226892709732056, 0.55686277151107788, 0.55686277151107788),
(0.67647057771682739, 0.5372549295425415, 0.5372549295425415),
(0.680672287940979, 0.51372551918029785, 0.51372551918029785),
(0.68487393856048584, 0.49411764740943909, 0.49411764740943909),
(0.68907564878463745, 0.47450980544090271, 0.47450980544090271),
(0.69327729940414429, 0.45098039507865906, 0.45098039507865906),
(0.6974790096282959, 0.43137255311012268, 0.43137255311012268),
(0.70168066024780273, 0.4117647111415863, 0.4117647111415863),
(0.70588237047195435, 0.38823530077934265, 0.38823530077934265),
(0.71008402109146118, 0.36862745881080627, 0.36862745881080627),
(0.71428573131561279, 0.34509804844856262, 0.34509804844856262),
(0.71848738193511963, 0.32549020648002625, 0.32549020648002625),
(0.72268909215927124, 0.30588236451148987, 0.30588236451148987),
(0.72689074277877808, 0.26274511218070984, 0.26274511218070984),
(0.73109245300292969, 0.24313725531101227, 0.24313725531101227),
(0.73529410362243652, 0.21960784494876862, 0.21960784494876862),
(0.73949581384658813, 0.20000000298023224, 0.20000000298023224),
(0.74369746446609497, 0.17647059261798859, 0.17647059261798859),
(0.74789917469024658, 0.15686275064945221, 0.15686275064945221),
(0.75210082530975342, 0.13725490868091583, 0.13725490868091583),
(0.75630253553390503, 0.11372549086809158, 0.11372549086809158),
(0.76050418615341187, 0.094117648899555206, 0.094117648899555206),
(0.76470589637756348, 0.070588238537311554, 0.070588238537311554),
(0.76890754699707031, 0.050980392843484879, 0.050980392843484879),
(0.77310925722122192, 0.031372550874948502, 0.031372550874948502),
(0.77731090784072876, 0.0078431377187371254, 0.0078431377187371254),
(0.78151261806488037, 0.0, 0.0), (0.78571426868438721, 0.0, 0.0),
(0.78991597890853882, 0.0, 0.0), (0.79411762952804565, 0.0, 0.0),
(0.79831933975219727, 0.0, 0.0), (0.8025209903717041, 0.0, 0.0),
(0.80672270059585571, 0.0, 0.0), (0.81092435121536255, 0.0, 0.0),
(0.81512606143951416, 0.0, 0.0), (0.819327712059021, 0.0, 0.0),
(0.82352942228317261, 0.0, 0.0), (0.82773107290267944, 0.0, 0.0),
(0.83193278312683105, 0.0, 0.0), (0.83613443374633789, 0.0, 0.0),
(0.8403361439704895, 0.0, 0.0), (0.84453779458999634, 0.0, 0.0),
(0.84873950481414795, 0.0, 0.0), (0.85294115543365479, 0.0, 0.0),
(0.8571428656578064, 0.0, 0.0), (0.86134451627731323, 0.0, 0.0),
(0.86554622650146484, 0.0, 0.0), (0.86974787712097168, 0.0, 0.0),
(0.87394958734512329, 0.0, 0.0), (0.87815123796463013, 0.0, 0.0),
(0.88235294818878174, 0.0, 0.0), (0.88655459880828857, 0.0, 0.0),
(0.89075630903244019, 0.0, 0.0), (0.89495795965194702, 0.0, 0.0),
(0.89915966987609863, 0.0, 0.0), (0.90336132049560547, 0.0, 0.0),
(0.90756303071975708, 0.0, 0.0), (0.91176468133926392, 0.0, 0.0),
(0.91596639156341553, 0.0, 0.0), (0.92016804218292236, 0.0, 0.0),
(0.92436975240707397, 0.0, 0.0), (0.92857140302658081, 0.0, 0.0),
(0.93277311325073242, 0.0, 0.0), (0.93697476387023926, 0.0, 0.0),
(0.94117647409439087, 0.0, 0.0), (0.94537812471389771, 0.0, 0.0),
(0.94957983493804932, 0.0, 0.0), (0.95378148555755615, 0.0, 0.0),
(0.95798319578170776, 0.0, 0.0), (0.9621848464012146, 0.0, 0.0),
(0.96638655662536621, 0.0, 0.0), (0.97058820724487305, 0.0, 0.0),
(0.97478991746902466, 0.0, 0.0), (0.97899156808853149, 0.0, 0.0),
(0.98319327831268311, 0.0, 0.0), (0.98739492893218994, 0.0, 0.0),
(0.99159663915634155, 0.0, 0.0), (0.99579828977584839, 0.0, 0.0), (1.0,
0.0, 0.0)], 'red': [(0.0, 1.0, 1.0), (0.0042016808874905109, 1.0, 1.0),
(0.0084033617749810219, 1.0, 1.0), (0.012605042196810246, 1.0, 1.0),
(0.016806723549962044, 1.0, 1.0), (0.021008403971791267, 1.0, 1.0),
(0.025210084393620491, 1.0, 1.0), (0.029411764815449715, 1.0, 1.0),
(0.033613447099924088, 1.0, 1.0), (0.037815127521753311, 1.0, 1.0),
(0.042016807943582535, 1.0, 1.0), (0.046218488365411758, 1.0, 1.0),
(0.050420168787240982, 1.0, 1.0), (0.054621849209070206, 1.0, 1.0),
(0.058823529630899429, 1.0, 1.0), (0.063025213778018951, 1.0, 1.0),
(0.067226894199848175, 1.0, 1.0), (0.071428574621677399, 1.0, 1.0),
(0.075630255043506622, 1.0, 1.0), (0.079831935465335846, 1.0, 1.0),
(0.08403361588716507, 1.0, 1.0), (0.088235296308994293, 1.0, 1.0),
(0.092436976730823517, 1.0, 1.0), (0.09663865715265274, 1.0, 1.0),
(0.10084033757448196, 1.0, 1.0), (0.10504201799631119, 1.0, 1.0),
(0.10924369841814041, 1.0, 1.0), (0.11344537883996964, 1.0, 1.0),
(0.11764705926179886, 1.0, 1.0), (0.12184873968362808, 1.0, 1.0),
(0.1260504275560379, 1.0, 1.0), (0.13025210797786713, 1.0, 1.0),
(0.13445378839969635, 1.0, 1.0), (0.13865546882152557, 1.0, 1.0),
(0.1428571492433548, 1.0, 1.0), (0.14705882966518402, 1.0, 1.0),
(0.15126051008701324, 1.0, 1.0), (0.15546219050884247, 1.0, 1.0),
(0.15966387093067169, 1.0, 1.0), (0.16386555135250092, 1.0, 1.0),
(0.16806723177433014, 1.0, 1.0), (0.17226891219615936, 1.0, 1.0),
(0.17647059261798859, 1.0, 1.0), (0.18067227303981781, 1.0, 1.0),
(0.18487395346164703, 1.0, 1.0), (0.18907563388347626, 1.0, 1.0),
(0.19327731430530548, 1.0, 1.0), (0.1974789947271347, 1.0, 1.0),
(0.20168067514896393, 1.0, 1.0), (0.20588235557079315, 1.0, 1.0),
(0.21008403599262238, 1.0, 1.0), (0.2142857164144516, 1.0, 1.0),
(0.21848739683628082, 1.0, 1.0), (0.22268907725811005,
0.96078431606292725, 0.96078431606292725), (0.22689075767993927,
0.94117647409439087, 0.94117647409439087), (0.23109243810176849,
0.92156863212585449, 0.92156863212585449), (0.23529411852359772,
0.89803922176361084, 0.89803922176361084), (0.23949579894542694,
0.87843137979507446, 0.87843137979507446), (0.24369747936725616,
0.85882353782653809, 0.85882353782653809), (0.24789915978908539,
0.83529412746429443, 0.83529412746429443), (0.25210085511207581,
0.81568628549575806, 0.81568628549575806), (0.25630253553390503,
0.7921568751335144, 0.7921568751335144), (0.26050421595573425,
0.77254903316497803, 0.77254903316497803), (0.26470589637756348,
0.75294119119644165, 0.75294119119644165), (0.2689075767993927,
0.729411780834198, 0.729411780834198), (0.27310925722122192,
0.70980393886566162, 0.70980393886566162), (0.27731093764305115,
0.68627452850341797, 0.68627452850341797), (0.28151261806488037,
0.66666668653488159, 0.66666668653488159), (0.28571429848670959,
0.62352943420410156, 0.62352943420410156), (0.28991597890853882,
0.60392159223556519, 0.60392159223556519), (0.29411765933036804,
0.58431375026702881, 0.58431375026702881), (0.29831933975219727,
0.56078433990478516, 0.56078433990478516), (0.30252102017402649,
0.54117649793624878, 0.54117649793624878), (0.30672270059585571,
0.51764708757400513, 0.51764708757400513), (0.31092438101768494,
0.49803921580314636, 0.49803921580314636), (0.31512606143951416,
0.47843137383460999, 0.47843137383460999), (0.31932774186134338,
0.45490196347236633, 0.45490196347236633), (0.32352942228317261,
0.43529412150382996, 0.43529412150382996), (0.32773110270500183,
0.41568627953529358, 0.41568627953529358), (0.33193278312683105,
0.39215686917304993, 0.39215686917304993), (0.33613446354866028,
0.37254902720451355, 0.37254902720451355), (0.3403361439704895,
0.3490196168422699, 0.3490196168422699), (0.34453782439231873,
0.32941177487373352, 0.32941177487373352), (0.34873950481414795,
0.28627452254295349, 0.28627452254295349), (0.35294118523597717,
0.26666668057441711, 0.26666668057441711), (0.3571428656578064,
0.24705882370471954, 0.24705882370471954), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.20392157137393951, 0.20392157137393951), (0.36974790692329407,
0.18039216101169586, 0.18039216101169586), (0.37394958734512329,
0.16078431904315948, 0.16078431904315948), (0.37815126776695251,
0.14117647707462311, 0.14117647707462311), (0.38235294818878174,
0.11764705926179886, 0.11764705926179886), (0.38655462861061096,
0.098039217293262482, 0.098039217293262482), (0.39075630903244019,
0.074509806931018829, 0.074509806931018829), (0.39495798945426941,
0.054901961237192154, 0.054901961237192154), (0.39915966987609863,
0.035294119268655777, 0.035294119268655777), (0.40336135029792786,
0.011764706112444401, 0.011764706112444401), (0.40756303071975708, 0.0,
0.0), (0.4117647111415863, 0.0, 0.0), (0.41596639156341553, 0.0, 0.0),
(0.42016807198524475, 0.0, 0.0), (0.42436975240707397, 0.0, 0.0),
(0.4285714328289032, 0.0, 0.0), (0.43277311325073242, 0.0, 0.0),
(0.43697479367256165, 0.0, 0.0), (0.44117647409439087, 0.0, 0.0),
(0.44537815451622009, 0.0, 0.0), (0.44957983493804932, 0.0, 0.0),
(0.45378151535987854, 0.0, 0.0), (0.45798319578170776, 0.0, 0.0),
(0.46218487620353699, 0.0, 0.0), (0.46638655662536621, 0.0, 0.0),
(0.47058823704719543, 0.0, 0.0), (0.47478991746902466, 0.0, 0.0),
(0.47899159789085388, 0.0, 0.0), (0.48319327831268311, 0.0, 0.0),
(0.48739495873451233, 0.0, 0.0), (0.49159663915634155, 0.0, 0.0),
(0.49579831957817078, 0.0, 0.0), (0.5, 0.0, 0.0), (0.50420171022415161,
0.0, 0.0), (0.50840336084365845, 0.0, 0.0), (0.51260507106781006, 0.0,
0.0), (0.51680672168731689, 0.0, 0.0), (0.52100843191146851, 0.0, 0.0),
(0.52521008253097534, 0.0, 0.0), (0.52941179275512695, 0.0, 0.0),
(0.53361344337463379, 0.0, 0.0), (0.5378151535987854, 0.0, 0.0),
(0.54201680421829224, 0.0, 0.0), (0.54621851444244385, 0.0, 0.0),
(0.55042016506195068, 0.0, 0.0), (0.55462187528610229, 0.0, 0.0),
(0.55882352590560913, 0.0, 0.0), (0.56302523612976074, 0.0, 0.0),
(0.56722688674926758, 0.0, 0.0), (0.57142859697341919, 0.0, 0.0),
(0.57563024759292603, 0.0, 0.0), (0.57983195781707764, 0.0, 0.0),
(0.58403360843658447, 0.0, 0.0), (0.58823531866073608, 0.0, 0.0),
(0.59243696928024292, 0.0, 0.0), (0.59663867950439453, 0.0, 0.0),
(0.60084033012390137, 0.0, 0.0), (0.60504204034805298, 0.0, 0.0),
(0.60924369096755981, 0.0, 0.0), (0.61344540119171143, 0.0, 0.0),
(0.61764705181121826, 0.0, 0.0), (0.62184876203536987, 0.0, 0.0),
(0.62605041265487671, 0.0, 0.0), (0.63025212287902832, 0.0, 0.0),
(0.63445377349853516, 0.0, 0.0), (0.63865548372268677, 0.0, 0.0),
(0.6428571343421936, 0.0, 0.0), (0.64705884456634521, 0.0, 0.0),
(0.65126049518585205, 0.0, 0.0), (0.65546220541000366, 0.0, 0.0),
(0.6596638560295105, 0.0, 0.0), (0.66386556625366211, 0.0, 0.0),
(0.66806721687316895, 0.0, 0.0), (0.67226892709732056, 0.0, 0.0),
(0.67647057771682739, 0.0, 0.0), (0.680672287940979, 0.0, 0.0),
(0.68487393856048584, 0.0, 0.0), (0.68907564878463745, 0.0, 0.0),
(0.69327729940414429, 0.0, 0.0), (0.6974790096282959, 0.0, 0.0),
(0.70168066024780273, 0.0, 0.0), (0.70588237047195435, 0.0, 0.0),
(0.71008402109146118, 0.0, 0.0), (0.71428573131561279, 0.0, 0.0),
(0.71848738193511963, 0.0, 0.0), (0.72268909215927124, 0.0, 0.0),
(0.72689074277877808, 0.0, 0.0), (0.73109245300292969, 0.0, 0.0),
(0.73529410362243652, 0.0, 0.0), (0.73949581384658813, 0.0, 0.0),
(0.74369746446609497, 0.0, 0.0), (0.74789917469024658, 0.0, 0.0),
(0.75210082530975342, 0.0, 0.0), (0.75630253553390503, 0.0, 0.0),
(0.76050418615341187, 0.0, 0.0), (0.76470589637756348, 0.0, 0.0),
(0.76890754699707031, 0.0, 0.0), (0.77310925722122192, 0.0, 0.0),
(0.77731090784072876, 0.0, 0.0), (0.78151261806488037,
0.0078431377187371254, 0.0078431377187371254), (0.78571426868438721,
0.027450980618596077, 0.027450980618596077), (0.78991597890853882,
0.070588238537311554, 0.070588238537311554), (0.79411762952804565,
0.094117648899555206, 0.094117648899555206), (0.79831933975219727,
0.11372549086809158, 0.11372549086809158), (0.8025209903717041,
0.13333334028720856, 0.13333334028720856), (0.80672270059585571,
0.15686275064945221, 0.15686275064945221), (0.81092435121536255,
0.17647059261798859, 0.17647059261798859), (0.81512606143951416,
0.19607843458652496, 0.19607843458652496), (0.819327712059021,
0.21960784494876862, 0.21960784494876862), (0.82352942228317261,
0.23921568691730499, 0.23921568691730499), (0.82773107290267944,
0.26274511218070984, 0.26274511218070984), (0.83193278312683105,
0.28235295414924622, 0.28235295414924622), (0.83613443374633789,
0.30196079611778259, 0.30196079611778259), (0.8403361439704895,
0.32549020648002625, 0.32549020648002625), (0.84453779458999634,
0.34509804844856262, 0.34509804844856262), (0.84873950481414795,
0.364705890417099, 0.364705890417099), (0.85294115543365479,
0.40784314274787903, 0.40784314274787903), (0.8571428656578064,
0.43137255311012268, 0.43137255311012268), (0.86134451627731323,
0.45098039507865906, 0.45098039507865906), (0.86554622650146484,
0.47058823704719543, 0.47058823704719543), (0.86974787712097168,
0.49411764740943909, 0.49411764740943909), (0.87394958734512329,
0.51372551918029785, 0.51372551918029785), (0.87815123796463013,
0.53333336114883423, 0.53333336114883423), (0.88235294818878174,
0.55686277151107788, 0.55686277151107788), (0.88655459880828857,
0.57647061347961426, 0.57647061347961426), (0.89075630903244019,
0.60000002384185791, 0.60000002384185791), (0.89495795965194702,
0.61960786581039429, 0.61960786581039429), (0.89915966987609863,
0.63921570777893066, 0.63921570777893066), (0.90336132049560547,
0.66274511814117432, 0.66274511814117432), (0.90756303071975708,
0.68235296010971069, 0.68235296010971069), (0.91176468133926392,
0.70588237047195435, 0.70588237047195435), (0.91596639156341553,
0.7450980544090271, 0.7450980544090271), (0.92016804218292236,
0.76862746477127075, 0.76862746477127075), (0.92436975240707397,
0.78823530673980713, 0.78823530673980713), (0.92857140302658081,
0.80784314870834351, 0.80784314870834351), (0.93277311325073242,
0.83137255907058716, 0.83137255907058716), (0.93697476387023926,
0.85098040103912354, 0.85098040103912354), (0.94117647409439087,
0.87450981140136719, 0.87450981140136719), (0.94537812471389771,
0.89411765336990356, 0.89411765336990356), (0.94957983493804932,
0.91372549533843994, 0.91372549533843994), (0.95378148555755615,
0.93725490570068359, 0.93725490570068359), (0.95798319578170776,
0.95686274766921997, 0.95686274766921997), (0.9621848464012146,
0.97647058963775635, 0.97647058963775635), (0.96638655662536621, 1.0,
1.0), (0.97058820724487305, 1.0, 1.0), (0.97478991746902466, 1.0, 1.0),
(0.97899156808853149, 1.0, 1.0), (0.98319327831268311, 1.0, 1.0),
(0.98739492893218994, 1.0, 1.0), (0.99159663915634155, 1.0, 1.0),
(0.99579828977584839, 1.0, 1.0), (1.0, 1.0, 1.0)]}
_gist_stern_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.011764706112444401,
0.011764706112444401), (0.012605042196810246, 0.019607843831181526,
0.019607843831181526), (0.016806723549962044, 0.027450980618596077,
0.027450980618596077), (0.021008403971791267, 0.035294119268655777,
0.035294119268655777), (0.025210084393620491, 0.043137256056070328,
0.043137256056070328), (0.029411764815449715, 0.050980392843484879,
0.050980392843484879), (0.033613447099924088, 0.058823529630899429,
0.058823529630899429), (0.037815127521753311, 0.066666670143604279,
0.066666670143604279), (0.042016807943582535, 0.08235294371843338,
0.08235294371843338), (0.046218488365411758, 0.090196080505847931,
0.090196080505847931), (0.050420168787240982, 0.098039217293262482,
0.098039217293262482), (0.054621849209070206, 0.10588235408067703,
0.10588235408067703), (0.058823529630899429, 0.11372549086809158,
0.11372549086809158), (0.063025213778018951, 0.12156862765550613,
0.12156862765550613), (0.067226894199848175, 0.12941177189350128,
0.12941177189350128), (0.071428574621677399, 0.13725490868091583,
0.13725490868091583), (0.075630255043506622, 0.14509804546833038,
0.14509804546833038), (0.079831935465335846, 0.15294118225574493,
0.15294118225574493), (0.08403361588716507, 0.16078431904315948,
0.16078431904315948), (0.088235296308994293, 0.16862745583057404,
0.16862745583057404), (0.092436976730823517, 0.17647059261798859,
0.17647059261798859), (0.09663865715265274, 0.18431372940540314,
0.18431372940540314), (0.10084033757448196, 0.19215686619281769,
0.19215686619281769), (0.10504201799631119, 0.20000000298023224,
0.20000000298023224), (0.10924369841814041, 0.20784313976764679,
0.20784313976764679), (0.11344537883996964, 0.21568627655506134,
0.21568627655506134), (0.11764705926179886, 0.22352941334247589,
0.22352941334247589), (0.12184873968362808, 0.23137255012989044,
0.23137255012989044), (0.1260504275560379, 0.24705882370471954,
0.24705882370471954), (0.13025210797786713, 0.25490197539329529,
0.25490197539329529), (0.13445378839969635, 0.26274511218070984,
0.26274511218070984), (0.13865546882152557, 0.27058824896812439,
0.27058824896812439), (0.1428571492433548, 0.27843138575553894,
0.27843138575553894), (0.14705882966518402, 0.28627452254295349,
0.28627452254295349), (0.15126051008701324, 0.29411765933036804,
0.29411765933036804), (0.15546219050884247, 0.30196079611778259,
0.30196079611778259), (0.15966387093067169, 0.30980393290519714,
0.30980393290519714), (0.16386555135250092, 0.31764706969261169,
0.31764706969261169), (0.16806723177433014, 0.32549020648002625,
0.32549020648002625), (0.17226891219615936, 0.3333333432674408,
0.3333333432674408), (0.17647059261798859, 0.34117648005485535,
0.34117648005485535), (0.18067227303981781, 0.3490196168422699,
0.3490196168422699), (0.18487395346164703, 0.35686275362968445,
0.35686275362968445), (0.18907563388347626, 0.364705890417099,
0.364705890417099), (0.19327731430530548, 0.37254902720451355,
0.37254902720451355), (0.1974789947271347, 0.3803921639919281,
0.3803921639919281), (0.20168067514896393, 0.38823530077934265,
0.38823530077934265), (0.20588235557079315, 0.3960784375667572,
0.3960784375667572), (0.21008403599262238, 0.4117647111415863,
0.4117647111415863), (0.2142857164144516, 0.41960784792900085,
0.41960784792900085), (0.21848739683628082, 0.42745098471641541,
0.42745098471641541), (0.22268907725811005, 0.43529412150382996,
0.43529412150382996), (0.22689075767993927, 0.44313725829124451,
0.44313725829124451), (0.23109243810176849, 0.45098039507865906,
0.45098039507865906), (0.23529411852359772, 0.45882353186607361,
0.45882353186607361), (0.23949579894542694, 0.46666666865348816,
0.46666666865348816), (0.24369747936725616, 0.47450980544090271,
0.47450980544090271), (0.24789915978908539, 0.48235294222831726,
0.48235294222831726), (0.25210085511207581, 0.49803921580314636,
0.49803921580314636), (0.25630253553390503, 0.5058823823928833,
0.5058823823928833), (0.26050421595573425, 0.51372551918029785,
0.51372551918029785), (0.26470589637756348, 0.5215686559677124,
0.5215686559677124), (0.2689075767993927, 0.52941179275512695,
0.52941179275512695), (0.27310925722122192, 0.5372549295425415,
0.5372549295425415), (0.27731093764305115, 0.54509806632995605,
0.54509806632995605), (0.28151261806488037, 0.55294120311737061,
0.55294120311737061), (0.28571429848670959, 0.56078433990478516,
0.56078433990478516), (0.28991597890853882, 0.56862747669219971,
0.56862747669219971), (0.29411765933036804, 0.58431375026702881,
0.58431375026702881), (0.29831933975219727, 0.59215688705444336,
0.59215688705444336), (0.30252102017402649, 0.60000002384185791,
0.60000002384185791), (0.30672270059585571, 0.60784316062927246,
0.60784316062927246), (0.31092438101768494, 0.61568629741668701,
0.61568629741668701), (0.31512606143951416, 0.62352943420410156,
0.62352943420410156), (0.31932774186134338, 0.63137257099151611,
0.63137257099151611), (0.32352942228317261, 0.63921570777893066,
0.63921570777893066), (0.32773110270500183, 0.64705884456634521,
0.64705884456634521), (0.33193278312683105, 0.65490198135375977,
0.65490198135375977), (0.33613446354866028, 0.66274511814117432,
0.66274511814117432), (0.3403361439704895, 0.67058825492858887,
0.67058825492858887), (0.34453782439231873, 0.67843139171600342,
0.67843139171600342), (0.34873950481414795, 0.68627452850341797,
0.68627452850341797), (0.35294118523597717, 0.69411766529083252,
0.69411766529083252), (0.3571428656578064, 0.70196080207824707,
0.70196080207824707), (0.36134454607963562, 0.70980393886566162,
0.70980393886566162), (0.36554622650146484, 0.71764707565307617,
0.71764707565307617), (0.36974790692329407, 0.72549021244049072,
0.72549021244049072), (0.37394958734512329, 0.73333334922790527,
0.73333334922790527), (0.37815126776695251, 0.74901962280273438,
0.74901962280273438), (0.38235294818878174, 0.75686275959014893,
0.75686275959014893), (0.38655462861061096, 0.76470589637756348,
0.76470589637756348), (0.39075630903244019, 0.77254903316497803,
0.77254903316497803), (0.39495798945426941, 0.78039216995239258,
0.78039216995239258), (0.39915966987609863, 0.78823530673980713,
0.78823530673980713), (0.40336135029792786, 0.79607844352722168,
0.79607844352722168), (0.40756303071975708, 0.80392158031463623,
0.80392158031463623), (0.4117647111415863, 0.81176471710205078,
0.81176471710205078), (0.41596639156341553, 0.81960785388946533,
0.81960785388946533), (0.42016807198524475, 0.82745099067687988,
0.82745099067687988), (0.42436975240707397, 0.83529412746429443,
0.83529412746429443), (0.4285714328289032, 0.84313726425170898,
0.84313726425170898), (0.43277311325073242, 0.85098040103912354,
0.85098040103912354), (0.43697479367256165, 0.85882353782653809,
0.85882353782653809), (0.44117647409439087, 0.86666667461395264,
0.86666667461395264), (0.44537815451622009, 0.87450981140136719,
0.87450981140136719), (0.44957983493804932, 0.88235294818878174,
0.88235294818878174), (0.45378151535987854, 0.89019608497619629,
0.89019608497619629), (0.45798319578170776, 0.89803922176361084,
0.89803922176361084), (0.46218487620353699, 0.91372549533843994,
0.91372549533843994), (0.46638655662536621, 0.92156863212585449,
0.92156863212585449), (0.47058823704719543, 0.92941176891326904,
0.92941176891326904), (0.47478991746902466, 0.93725490570068359,
0.93725490570068359), (0.47899159789085388, 0.94509804248809814,
0.94509804248809814), (0.48319327831268311, 0.9529411792755127,
0.9529411792755127), (0.48739495873451233, 0.96078431606292725,
0.96078431606292725), (0.49159663915634155, 0.9686274528503418,
0.9686274528503418), (0.49579831957817078, 0.97647058963775635,
0.97647058963775635), (0.5, 0.9843137264251709, 0.9843137264251709),
(0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 0.9843137264251709,
0.9843137264251709), (0.51260507106781006, 0.9686274528503418,
0.9686274528503418), (0.51680672168731689, 0.9529411792755127,
0.9529411792755127), (0.52100843191146851, 0.93333333730697632,
0.93333333730697632), (0.52521008253097534, 0.91764706373214722,
0.91764706373214722), (0.52941179275512695, 0.90196079015731812,
0.90196079015731812), (0.53361344337463379, 0.88627451658248901,
0.88627451658248901), (0.5378151535987854, 0.86666667461395264,
0.86666667461395264), (0.54201680421829224, 0.85098040103912354,
0.85098040103912354), (0.54621851444244385, 0.81960785388946533,
0.81960785388946533), (0.55042016506195068, 0.80000001192092896,
0.80000001192092896), (0.55462187528610229, 0.78431373834609985,
0.78431373834609985), (0.55882352590560913, 0.76862746477127075,
0.76862746477127075), (0.56302523612976074, 0.75294119119644165,
0.75294119119644165), (0.56722688674926758, 0.73333334922790527,
0.73333334922790527), (0.57142859697341919, 0.71764707565307617,
0.71764707565307617), (0.57563024759292603, 0.70196080207824707,
0.70196080207824707), (0.57983195781707764, 0.68627452850341797,
0.68627452850341797), (0.58403360843658447, 0.66666668653488159,
0.66666668653488159), (0.58823531866073608, 0.65098041296005249,
0.65098041296005249), (0.59243696928024292, 0.63529413938522339,
0.63529413938522339), (0.59663867950439453, 0.61960786581039429,
0.61960786581039429), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.58431375026702881,
0.58431375026702881), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.55294120311737061,
0.55294120311737061), (0.61764705181121826, 0.53333336114883423,
0.53333336114883423), (0.62184876203536987, 0.51764708757400513,
0.51764708757400513), (0.62605041265487671, 0.50196081399917603,
0.50196081399917603), (0.63025212287902832, 0.46666666865348816,
0.46666666865348816), (0.63445377349853516, 0.45098039507865906,
0.45098039507865906), (0.63865548372268677, 0.43529412150382996,
0.43529412150382996), (0.6428571343421936, 0.41960784792900085,
0.41960784792900085), (0.64705884456634521, 0.40000000596046448,
0.40000000596046448), (0.65126049518585205, 0.38431373238563538,
0.38431373238563538), (0.65546220541000366, 0.36862745881080627,
0.36862745881080627), (0.6596638560295105, 0.35294118523597717,
0.35294118523597717), (0.66386556625366211, 0.3333333432674408,
0.3333333432674408), (0.66806721687316895, 0.31764706969261169,
0.31764706969261169), (0.67226892709732056, 0.30196079611778259,
0.30196079611778259), (0.67647057771682739, 0.28627452254295349,
0.28627452254295349), (0.680672287940979, 0.26666668057441711,
0.26666668057441711), (0.68487393856048584, 0.25098040699958801,
0.25098040699958801), (0.68907564878463745, 0.23529411852359772,
0.23529411852359772), (0.69327729940414429, 0.21960784494876862,
0.21960784494876862), (0.6974790096282959, 0.20000000298023224,
0.20000000298023224), (0.70168066024780273, 0.18431372940540314,
0.18431372940540314), (0.70588237047195435, 0.16862745583057404,
0.16862745583057404), (0.71008402109146118, 0.15294118225574493,
0.15294118225574493), (0.71428573131561279, 0.11764705926179886,
0.11764705926179886), (0.71848738193511963, 0.10196078568696976,
0.10196078568696976), (0.72268909215927124, 0.086274512112140656,
0.086274512112140656), (0.72689074277877808, 0.066666670143604279,
0.066666670143604279), (0.73109245300292969, 0.050980392843484879,
0.050980392843484879), (0.73529410362243652, 0.035294119268655777,
0.035294119268655777), (0.73949581384658813, 0.019607843831181526,
0.019607843831181526), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.011764706112444401, 0.011764706112444401),
(0.75210082530975342, 0.027450980618596077, 0.027450980618596077),
(0.75630253553390503, 0.058823529630899429, 0.058823529630899429),
(0.76050418615341187, 0.074509806931018829, 0.074509806931018829),
(0.76470589637756348, 0.086274512112140656, 0.086274512112140656),
(0.76890754699707031, 0.10196078568696976, 0.10196078568696976),
(0.77310925722122192, 0.11764705926179886, 0.11764705926179886),
(0.77731090784072876, 0.13333334028720856, 0.13333334028720856),
(0.78151261806488037, 0.14901961386203766, 0.14901961386203766),
(0.78571426868438721, 0.16078431904315948, 0.16078431904315948),
(0.78991597890853882, 0.17647059261798859, 0.17647059261798859),
(0.79411762952804565, 0.19215686619281769, 0.19215686619281769),
(0.79831933975219727, 0.22352941334247589, 0.22352941334247589),
(0.8025209903717041, 0.23529411852359772, 0.23529411852359772),
(0.80672270059585571, 0.25098040699958801, 0.25098040699958801),
(0.81092435121536255, 0.26666668057441711, 0.26666668057441711),
(0.81512606143951416, 0.28235295414924622, 0.28235295414924622),
(0.819327712059021, 0.29803922772407532, 0.29803922772407532),
(0.82352942228317261, 0.30980393290519714, 0.30980393290519714),
(0.82773107290267944, 0.32549020648002625, 0.32549020648002625),
(0.83193278312683105, 0.34117648005485535, 0.34117648005485535),
(0.83613443374633789, 0.35686275362968445, 0.35686275362968445),
(0.8403361439704895, 0.37254902720451355, 0.37254902720451355),
(0.84453779458999634, 0.38431373238563538, 0.38431373238563538),
(0.84873950481414795, 0.40000000596046448, 0.40000000596046448),
(0.85294115543365479, 0.41568627953529358, 0.41568627953529358),
(0.8571428656578064, 0.43137255311012268, 0.43137255311012268),
(0.86134451627731323, 0.44705882668495178, 0.44705882668495178),
(0.86554622650146484, 0.45882353186607361, 0.45882353186607361),
(0.86974787712097168, 0.47450980544090271, 0.47450980544090271),
(0.87394958734512329, 0.49019607901573181, 0.49019607901573181),
(0.87815123796463013, 0.5058823823928833, 0.5058823823928833),
(0.88235294818878174, 0.5372549295425415, 0.5372549295425415),
(0.88655459880828857, 0.54901963472366333, 0.54901963472366333),
(0.89075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.89495795965194702, 0.58039218187332153, 0.58039218187332153),
(0.89915966987609863, 0.59607845544815063, 0.59607845544815063),
(0.90336132049560547, 0.61176472902297974, 0.61176472902297974),
(0.90756303071975708, 0.62352943420410156, 0.62352943420410156),
(0.91176468133926392, 0.63921570777893066, 0.63921570777893066),
(0.91596639156341553, 0.65490198135375977, 0.65490198135375977),
(0.92016804218292236, 0.67058825492858887, 0.67058825492858887),
(0.92436975240707397, 0.68627452850341797, 0.68627452850341797),
(0.92857140302658081, 0.69803923368453979, 0.69803923368453979),
(0.93277311325073242, 0.7137255072593689, 0.7137255072593689),
(0.93697476387023926, 0.729411780834198, 0.729411780834198),
(0.94117647409439087, 0.7450980544090271, 0.7450980544090271),
(0.94537812471389771, 0.7607843279838562, 0.7607843279838562),
(0.94957983493804932, 0.77254903316497803, 0.77254903316497803),
(0.95378148555755615, 0.78823530673980713, 0.78823530673980713),
(0.95798319578170776, 0.80392158031463623, 0.80392158031463623),
(0.9621848464012146, 0.81960785388946533, 0.81960785388946533),
(0.96638655662536621, 0.84705883264541626, 0.84705883264541626),
(0.97058820724487305, 0.86274510622024536, 0.86274510622024536),
(0.97478991746902466, 0.87843137979507446, 0.87843137979507446),
(0.97899156808853149, 0.89411765336990356, 0.89411765336990356),
(0.98319327831268311, 0.90980392694473267, 0.90980392694473267),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.031372550874948502, 0.031372550874948502),
(0.037815127521753311, 0.035294119268655777, 0.035294119268655777),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.094117648899555206, 0.094117648899555206),
(0.10084033757448196, 0.098039217293262482, 0.098039217293262482),
(0.10504201799631119, 0.10196078568696976, 0.10196078568696976),
(0.10924369841814041, 0.10588235408067703, 0.10588235408067703),
(0.11344537883996964, 0.10980392247438431, 0.10980392247438431),
(0.11764705926179886, 0.11372549086809158, 0.11372549086809158),
(0.12184873968362808, 0.11764705926179886, 0.11764705926179886),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.15686275064945221, 0.15686275064945221),
(0.16386555135250092, 0.16078431904315948, 0.16078431904315948),
(0.16806723177433014, 0.16470588743686676, 0.16470588743686676),
(0.17226891219615936, 0.16862745583057404, 0.16862745583057404),
(0.17647059261798859, 0.17254902422428131, 0.17254902422428131),
(0.18067227303981781, 0.17647059261798859, 0.17647059261798859),
(0.18487395346164703, 0.18039216101169586, 0.18039216101169586),
(0.18907563388347626, 0.18431372940540314, 0.18431372940540314),
(0.19327731430530548, 0.18823529779911041, 0.18823529779911041),
(0.1974789947271347, 0.19215686619281769, 0.19215686619281769),
(0.20168067514896393, 0.19607843458652496, 0.19607843458652496),
(0.20588235557079315, 0.20000000298023224, 0.20000000298023224),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.21960784494876862, 0.21960784494876862),
(0.22689075767993927, 0.22352941334247589, 0.22352941334247589),
(0.23109243810176849, 0.22745098173618317, 0.22745098173618317),
(0.23529411852359772, 0.23137255012989044, 0.23137255012989044),
(0.23949579894542694, 0.23529411852359772, 0.23529411852359772),
(0.24369747936725616, 0.23921568691730499, 0.23921568691730499),
(0.24789915978908539, 0.24313725531101227, 0.24313725531101227),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28235295414924622, 0.28235295414924622),
(0.28991597890853882, 0.28627452254295349, 0.28627452254295349),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.34509804844856262, 0.34509804844856262),
(0.35294118523597717, 0.3490196168422699, 0.3490196168422699),
(0.3571428656578064, 0.35294118523597717, 0.35294118523597717),
(0.36134454607963562, 0.35686275362968445, 0.35686275362968445),
(0.36554622650146484, 0.36078432202339172, 0.36078432202339172),
(0.36974790692329407, 0.364705890417099, 0.364705890417099),
(0.37394958734512329, 0.36862745881080627, 0.36862745881080627),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.40784314274787903, 0.40784314274787903),
(0.41596639156341553, 0.4117647111415863, 0.4117647111415863),
(0.42016807198524475, 0.41568627953529358, 0.41568627953529358),
(0.42436975240707397, 0.41960784792900085, 0.41960784792900085),
(0.4285714328289032, 0.42352941632270813, 0.42352941632270813),
(0.43277311325073242, 0.42745098471641541, 0.42745098471641541),
(0.43697479367256165, 0.43137255311012268, 0.43137255311012268),
(0.44117647409439087, 0.43529412150382996, 0.43529412150382996),
(0.44537815451622009, 0.43921568989753723, 0.43921568989753723),
(0.44957983493804932, 0.44313725829124451, 0.44313725829124451),
(0.45378151535987854, 0.44705882668495178, 0.44705882668495178),
(0.45798319578170776, 0.45098039507865906, 0.45098039507865906),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47058823704719543, 0.47058823704719543),
(0.47899159789085388, 0.47450980544090271, 0.47450980544090271),
(0.48319327831268311, 0.47843137383460999, 0.47843137383460999),
(0.48739495873451233, 0.48235294222831726, 0.48235294222831726),
(0.49159663915634155, 0.48627451062202454, 0.48627451062202454),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.49411764740943909, 0.49411764740943909), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.53333336114883423, 0.53333336114883423), (0.54201680421829224,
0.5372549295425415, 0.5372549295425415), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.59607845544815063, 0.59607845544815063), (0.60504204034805298,
0.60000002384185791, 0.60000002384185791), (0.60924369096755981,
0.60392159223556519, 0.60392159223556519), (0.61344540119171143,
0.60784316062927246, 0.60784316062927246), (0.61764705181121826,
0.61176472902297974, 0.61176472902297974), (0.62184876203536987,
0.61568629741668701, 0.61568629741668701), (0.62605041265487671,
0.61960786581039429, 0.61960786581039429), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.65882354974746704, 0.65882354974746704), (0.66806721687316895,
0.66274511814117432, 0.66274511814117432), (0.67226892709732056,
0.66666668653488159, 0.66666668653488159), (0.67647057771682739,
0.67058825492858887, 0.67058825492858887), (0.680672287940979,
0.67450982332229614, 0.67450982332229614), (0.68487393856048584,
0.67843139171600342, 0.67843139171600342), (0.68907564878463745,
0.68235296010971069, 0.68235296010971069), (0.69327729940414429,
0.68627452850341797, 0.68627452850341797), (0.6974790096282959,
0.69019609689712524, 0.69019609689712524), (0.70168066024780273,
0.69411766529083252, 0.69411766529083252), (0.70588237047195435,
0.69803923368453979, 0.69803923368453979), (0.71008402109146118,
0.70196080207824707, 0.70196080207824707), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72156864404678345, 0.72156864404678345), (0.73109245300292969,
0.72549021244049072, 0.72549021244049072), (0.73529410362243652,
0.729411780834198, 0.729411780834198), (0.73949581384658813,
0.73333334922790527, 0.73333334922790527), (0.74369746446609497,
0.73725491762161255, 0.73725491762161255), (0.74789917469024658,
0.74117648601531982, 0.74117648601531982), (0.75210082530975342,
0.7450980544090271, 0.7450980544090271), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78431373834609985, 0.78431373834609985), (0.79411762952804565,
0.78823530673980713, 0.78823530673980713), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.84705883264541626, 0.84705883264541626), (0.8571428656578064,
0.85098040103912354, 0.85098040103912354), (0.86134451627731323,
0.85490196943283081, 0.85490196943283081), (0.86554622650146484,
0.85882353782653809, 0.85882353782653809), (0.86974787712097168,
0.86274510622024536, 0.86274510622024536), (0.87394958734512329,
0.86666667461395264, 0.86666667461395264), (0.87815123796463013,
0.87058824300765991, 0.87058824300765991), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.90980392694473267, 0.90980392694473267), (0.92016804218292236,
0.91372549533843994, 0.91372549533843994), (0.92436975240707397,
0.91764706373214722, 0.91764706373214722), (0.92857140302658081,
0.92156863212585449, 0.92156863212585449), (0.93277311325073242,
0.92549020051956177, 0.92549020051956177), (0.93697476387023926,
0.92941176891326904, 0.92941176891326904), (0.94117647409439087,
0.93333333730697632, 0.93333333730697632), (0.94537812471389771,
0.93725490570068359, 0.93725490570068359), (0.94957983493804932,
0.94117647409439087, 0.94117647409439087), (0.95378148555755615,
0.94509804248809814, 0.94509804248809814), (0.95798319578170776,
0.94901961088180542, 0.94901961088180542), (0.9621848464012146,
0.9529411792755127, 0.9529411792755127), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97254902124404907, 0.97254902124404907), (0.98319327831268311,
0.97647058963775635, 0.97647058963775635), (0.98739492893218994,
0.98039215803146362, 0.98039215803146362), (0.99159663915634155,
0.9843137264251709, 0.9843137264251709), (0.99579828977584839,
0.98823529481887817, 0.98823529481887817), (1.0, 0.99215686321258545,
0.99215686321258545)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.070588238537311554, 0.070588238537311554), (0.0084033617749810219,
0.14117647707462311, 0.14117647707462311), (0.012605042196810246,
0.21176470816135406, 0.21176470816135406), (0.016806723549962044,
0.28235295414924622, 0.28235295414924622), (0.021008403971791267,
0.35294118523597717, 0.35294118523597717), (0.025210084393620491,
0.42352941632270813, 0.42352941632270813), (0.029411764815449715,
0.49803921580314636, 0.49803921580314636), (0.033613447099924088,
0.56862747669219971, 0.56862747669219971), (0.037815127521753311,
0.63921570777893066, 0.63921570777893066), (0.042016807943582535,
0.78039216995239258, 0.78039216995239258), (0.046218488365411758,
0.85098040103912354, 0.85098040103912354), (0.050420168787240982,
0.92156863212585449, 0.92156863212585449), (0.054621849209070206,
0.99607843160629272, 0.99607843160629272), (0.058823529630899429,
0.97647058963775635, 0.97647058963775635), (0.063025213778018951,
0.95686274766921997, 0.95686274766921997), (0.067226894199848175,
0.93725490570068359, 0.93725490570068359), (0.071428574621677399,
0.91764706373214722, 0.91764706373214722), (0.075630255043506622,
0.89803922176361084, 0.89803922176361084), (0.079831935465335846,
0.87450981140136719, 0.87450981140136719), (0.08403361588716507,
0.85490196943283081, 0.85490196943283081), (0.088235296308994293,
0.83529412746429443, 0.83529412746429443), (0.092436976730823517,
0.81568628549575806, 0.81568628549575806), (0.09663865715265274,
0.79607844352722168, 0.79607844352722168), (0.10084033757448196,
0.77254903316497803, 0.77254903316497803), (0.10504201799631119,
0.75294119119644165, 0.75294119119644165), (0.10924369841814041,
0.73333334922790527, 0.73333334922790527), (0.11344537883996964,
0.7137255072593689, 0.7137255072593689), (0.11764705926179886,
0.69411766529083252, 0.69411766529083252), (0.12184873968362808,
0.67450982332229614, 0.67450982332229614), (0.1260504275560379,
0.63137257099151611, 0.63137257099151611), (0.13025210797786713,
0.61176472902297974, 0.61176472902297974), (0.13445378839969635,
0.59215688705444336, 0.59215688705444336), (0.13865546882152557,
0.57254904508590698, 0.57254904508590698), (0.1428571492433548,
0.54901963472366333, 0.54901963472366333), (0.14705882966518402,
0.52941179275512695, 0.52941179275512695), (0.15126051008701324,
0.50980395078659058, 0.50980395078659058), (0.15546219050884247,
0.49019607901573181, 0.49019607901573181), (0.15966387093067169,
0.47058823704719543, 0.47058823704719543), (0.16386555135250092,
0.45098039507865906, 0.45098039507865906), (0.16806723177433014,
0.42745098471641541, 0.42745098471641541), (0.17226891219615936,
0.40784314274787903, 0.40784314274787903), (0.17647059261798859,
0.38823530077934265, 0.38823530077934265), (0.18067227303981781,
0.36862745881080627, 0.36862745881080627), (0.18487395346164703,
0.3490196168422699, 0.3490196168422699), (0.18907563388347626,
0.32549020648002625, 0.32549020648002625), (0.19327731430530548,
0.30588236451148987, 0.30588236451148987), (0.1974789947271347,
0.28627452254295349, 0.28627452254295349), (0.20168067514896393,
0.26666668057441711, 0.26666668057441711), (0.20588235557079315,
0.24705882370471954, 0.24705882370471954), (0.21008403599262238,
0.20392157137393951, 0.20392157137393951), (0.2142857164144516,
0.18431372940540314, 0.18431372940540314), (0.21848739683628082,
0.16470588743686676, 0.16470588743686676), (0.22268907725811005,
0.14509804546833038, 0.14509804546833038), (0.22689075767993927,
0.12549020349979401, 0.12549020349979401), (0.23109243810176849,
0.10196078568696976, 0.10196078568696976), (0.23529411852359772,
0.08235294371843338, 0.08235294371843338), (0.23949579894542694,
0.062745101749897003, 0.062745101749897003), (0.24369747936725616,
0.043137256056070328, 0.043137256056070328), (0.24789915978908539,
0.023529412224888802, 0.023529412224888802), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28235295414924622, 0.28235295414924622), (0.28991597890853882,
0.28627452254295349, 0.28627452254295349), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.34509804844856262, 0.34509804844856262), (0.35294118523597717,
0.3490196168422699, 0.3490196168422699), (0.3571428656578064,
0.35294118523597717, 0.35294118523597717), (0.36134454607963562,
0.35686275362968445, 0.35686275362968445), (0.36554622650146484,
0.36078432202339172, 0.36078432202339172), (0.36974790692329407,
0.364705890417099, 0.364705890417099), (0.37394958734512329,
0.36862745881080627, 0.36862745881080627), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.40784314274787903, 0.40784314274787903), (0.41596639156341553,
0.4117647111415863, 0.4117647111415863), (0.42016807198524475,
0.41568627953529358, 0.41568627953529358), (0.42436975240707397,
0.41960784792900085, 0.41960784792900085), (0.4285714328289032,
0.42352941632270813, 0.42352941632270813), (0.43277311325073242,
0.42745098471641541, 0.42745098471641541), (0.43697479367256165,
0.43137255311012268, 0.43137255311012268), (0.44117647409439087,
0.43529412150382996, 0.43529412150382996), (0.44537815451622009,
0.43921568989753723, 0.43921568989753723), (0.44957983493804932,
0.44313725829124451, 0.44313725829124451), (0.45378151535987854,
0.44705882668495178, 0.44705882668495178), (0.45798319578170776,
0.45098039507865906, 0.45098039507865906), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47058823704719543, 0.47058823704719543), (0.47899159789085388,
0.47450980544090271, 0.47450980544090271), (0.48319327831268311,
0.47843137383460999, 0.47843137383460999), (0.48739495873451233,
0.48235294222831726, 0.48235294222831726), (0.49159663915634155,
0.48627451062202454, 0.48627451062202454), (0.49579831957817078,
0.49019607901573181, 0.49019607901573181), (0.5, 0.49411764740943909,
0.49411764740943909), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.53333336114883423,
0.53333336114883423), (0.54201680421829224, 0.5372549295425415,
0.5372549295425415), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.59607845544815063,
0.59607845544815063), (0.60504204034805298, 0.60000002384185791,
0.60000002384185791), (0.60924369096755981, 0.60392159223556519,
0.60392159223556519), (0.61344540119171143, 0.60784316062927246,
0.60784316062927246), (0.61764705181121826, 0.61176472902297974,
0.61176472902297974), (0.62184876203536987, 0.61568629741668701,
0.61568629741668701), (0.62605041265487671, 0.61960786581039429,
0.61960786581039429), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66274511814117432,
0.66274511814117432), (0.67226892709732056, 0.66666668653488159,
0.66666668653488159), (0.67647057771682739, 0.67058825492858887,
0.67058825492858887), (0.680672287940979, 0.67450982332229614,
0.67450982332229614), (0.68487393856048584, 0.67843139171600342,
0.67843139171600342), (0.68907564878463745, 0.68235296010971069,
0.68235296010971069), (0.69327729940414429, 0.68627452850341797,
0.68627452850341797), (0.6974790096282959, 0.69019609689712524,
0.69019609689712524), (0.70168066024780273, 0.69411766529083252,
0.69411766529083252), (0.70588237047195435, 0.69803923368453979,
0.69803923368453979), (0.71008402109146118, 0.70196080207824707,
0.70196080207824707), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72156864404678345,
0.72156864404678345), (0.73109245300292969, 0.72549021244049072,
0.72549021244049072), (0.73529410362243652, 0.729411780834198,
0.729411780834198), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.74117648601531982,
0.74117648601531982), (0.75210082530975342, 0.7450980544090271,
0.7450980544090271), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78431373834609985,
0.78431373834609985), (0.79411762952804565, 0.78823530673980713,
0.78823530673980713), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.84705883264541626,
0.84705883264541626), (0.8571428656578064, 0.85098040103912354,
0.85098040103912354), (0.86134451627731323, 0.85490196943283081,
0.85490196943283081), (0.86554622650146484, 0.85882353782653809,
0.85882353782653809), (0.86974787712097168, 0.86274510622024536,
0.86274510622024536), (0.87394958734512329, 0.86666667461395264,
0.86666667461395264), (0.87815123796463013, 0.87058824300765991,
0.87058824300765991), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.90980392694473267,
0.90980392694473267), (0.92016804218292236, 0.91372549533843994,
0.91372549533843994), (0.92436975240707397, 0.91764706373214722,
0.91764706373214722), (0.92857140302658081, 0.92156863212585449,
0.92156863212585449), (0.93277311325073242, 0.92549020051956177,
0.92549020051956177), (0.93697476387023926, 0.92941176891326904,
0.92941176891326904), (0.94117647409439087, 0.93333333730697632,
0.93333333730697632), (0.94537812471389771, 0.93725490570068359,
0.93725490570068359), (0.94957983493804932, 0.94117647409439087,
0.94117647409439087), (0.95378148555755615, 0.94509804248809814,
0.94509804248809814), (0.95798319578170776, 0.94901961088180542,
0.94901961088180542), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97254902124404907,
0.97254902124404907), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_yarg_data = {'blue': [(0.0, 1.0, 1.0), (0.0042016808874905109,
0.99607843160629272, 0.99607843160629272), (0.0084033617749810219,
0.99215686321258545, 0.99215686321258545), (0.012605042196810246,
0.98823529481887817, 0.98823529481887817), (0.016806723549962044,
0.9843137264251709, 0.9843137264251709), (0.021008403971791267,
0.98039215803146362, 0.98039215803146362), (0.025210084393620491,
0.97647058963775635, 0.97647058963775635), (0.029411764815449715,
0.97254902124404907, 0.97254902124404907), (0.033613447099924088,
0.96470588445663452, 0.96470588445663452), (0.037815127521753311,
0.96078431606292725, 0.96078431606292725), (0.042016807943582535,
0.95686274766921997, 0.95686274766921997), (0.046218488365411758,
0.9529411792755127, 0.9529411792755127), (0.050420168787240982,
0.94901961088180542, 0.94901961088180542), (0.054621849209070206,
0.94509804248809814, 0.94509804248809814), (0.058823529630899429,
0.94117647409439087, 0.94117647409439087), (0.063025213778018951,
0.93725490570068359, 0.93725490570068359), (0.067226894199848175,
0.93333333730697632, 0.93333333730697632), (0.071428574621677399,
0.92941176891326904, 0.92941176891326904), (0.075630255043506622,
0.92549020051956177, 0.92549020051956177), (0.079831935465335846,
0.92156863212585449, 0.92156863212585449), (0.08403361588716507,
0.91764706373214722, 0.91764706373214722), (0.088235296308994293,
0.91372549533843994, 0.91372549533843994), (0.092436976730823517,
0.90980392694473267, 0.90980392694473267), (0.09663865715265274,
0.90196079015731812, 0.90196079015731812), (0.10084033757448196,
0.89803922176361084, 0.89803922176361084), (0.10504201799631119,
0.89411765336990356, 0.89411765336990356), (0.10924369841814041,
0.89019608497619629, 0.89019608497619629), (0.11344537883996964,
0.88627451658248901, 0.88627451658248901), (0.11764705926179886,
0.88235294818878174, 0.88235294818878174), (0.12184873968362808,
0.87843137979507446, 0.87843137979507446), (0.1260504275560379,
0.87450981140136719, 0.87450981140136719), (0.13025210797786713,
0.87058824300765991, 0.87058824300765991), (0.13445378839969635,
0.86666667461395264, 0.86666667461395264), (0.13865546882152557,
0.86274510622024536, 0.86274510622024536), (0.1428571492433548,
0.85882353782653809, 0.85882353782653809), (0.14705882966518402,
0.85490196943283081, 0.85490196943283081), (0.15126051008701324,
0.85098040103912354, 0.85098040103912354), (0.15546219050884247,
0.84705883264541626, 0.84705883264541626), (0.15966387093067169,
0.83921569585800171, 0.83921569585800171), (0.16386555135250092,
0.83529412746429443, 0.83529412746429443), (0.16806723177433014,
0.83137255907058716, 0.83137255907058716), (0.17226891219615936,
0.82745099067687988, 0.82745099067687988), (0.17647059261798859,
0.82352942228317261, 0.82352942228317261), (0.18067227303981781,
0.81960785388946533, 0.81960785388946533), (0.18487395346164703,
0.81568628549575806, 0.81568628549575806), (0.18907563388347626,
0.81176471710205078, 0.81176471710205078), (0.19327731430530548,
0.80784314870834351, 0.80784314870834351), (0.1974789947271347,
0.80392158031463623, 0.80392158031463623), (0.20168067514896393,
0.80000001192092896, 0.80000001192092896), (0.20588235557079315,
0.79607844352722168, 0.79607844352722168), (0.21008403599262238,
0.7921568751335144, 0.7921568751335144), (0.2142857164144516,
0.78823530673980713, 0.78823530673980713), (0.21848739683628082,
0.78431373834609985, 0.78431373834609985), (0.22268907725811005,
0.7764706015586853, 0.7764706015586853), (0.22689075767993927,
0.77254903316497803, 0.77254903316497803), (0.23109243810176849,
0.76862746477127075, 0.76862746477127075), (0.23529411852359772,
0.76470589637756348, 0.76470589637756348), (0.23949579894542694,
0.7607843279838562, 0.7607843279838562), (0.24369747936725616,
0.75686275959014893, 0.75686275959014893), (0.24789915978908539,
0.75294119119644165, 0.75294119119644165), (0.25210085511207581,
0.74901962280273438, 0.74901962280273438), (0.25630253553390503,
0.7450980544090271, 0.7450980544090271), (0.26050421595573425,
0.74117648601531982, 0.74117648601531982), (0.26470589637756348,
0.73725491762161255, 0.73725491762161255), (0.2689075767993927,
0.73333334922790527, 0.73333334922790527), (0.27310925722122192,
0.729411780834198, 0.729411780834198), (0.27731093764305115,
0.72549021244049072, 0.72549021244049072), (0.28151261806488037,
0.72156864404678345, 0.72156864404678345), (0.28571429848670959,
0.7137255072593689, 0.7137255072593689), (0.28991597890853882,
0.70980393886566162, 0.70980393886566162), (0.29411765933036804,
0.70588237047195435, 0.70588237047195435), (0.29831933975219727,
0.70196080207824707, 0.70196080207824707), (0.30252102017402649,
0.69803923368453979, 0.69803923368453979), (0.30672270059585571,
0.69411766529083252, 0.69411766529083252), (0.31092438101768494,
0.69019609689712524, 0.69019609689712524), (0.31512606143951416,
0.68627452850341797, 0.68627452850341797), (0.31932774186134338,
0.68235296010971069, 0.68235296010971069), (0.32352942228317261,
0.67843139171600342, 0.67843139171600342), (0.32773110270500183,
0.67450982332229614, 0.67450982332229614), (0.33193278312683105,
0.67058825492858887, 0.67058825492858887), (0.33613446354866028,
0.66666668653488159, 0.66666668653488159), (0.3403361439704895,
0.66274511814117432, 0.66274511814117432), (0.34453782439231873,
0.65882354974746704, 0.65882354974746704), (0.34873950481414795,
0.65098041296005249, 0.65098041296005249), (0.35294118523597717,
0.64705884456634521, 0.64705884456634521), (0.3571428656578064,
0.64313727617263794, 0.64313727617263794), (0.36134454607963562,
0.63921570777893066, 0.63921570777893066), (0.36554622650146484,
0.63529413938522339, 0.63529413938522339), (0.36974790692329407,
0.63137257099151611, 0.63137257099151611), (0.37394958734512329,
0.62745100259780884, 0.62745100259780884), (0.37815126776695251,
0.62352943420410156, 0.62352943420410156), (0.38235294818878174,
0.61960786581039429, 0.61960786581039429), (0.38655462861061096,
0.61568629741668701, 0.61568629741668701), (0.39075630903244019,
0.61176472902297974, 0.61176472902297974), (0.39495798945426941,
0.60784316062927246, 0.60784316062927246), (0.39915966987609863,
0.60392159223556519, 0.60392159223556519), (0.40336135029792786,
0.60000002384185791, 0.60000002384185791), (0.40756303071975708,
0.59607845544815063, 0.59607845544815063), (0.4117647111415863,
0.58823531866073608, 0.58823531866073608), (0.41596639156341553,
0.58431375026702881, 0.58431375026702881), (0.42016807198524475,
0.58039218187332153, 0.58039218187332153), (0.42436975240707397,
0.57647061347961426, 0.57647061347961426), (0.4285714328289032,
0.57254904508590698, 0.57254904508590698), (0.43277311325073242,
0.56862747669219971, 0.56862747669219971), (0.43697479367256165,
0.56470590829849243, 0.56470590829849243), (0.44117647409439087,
0.56078433990478516, 0.56078433990478516), (0.44537815451622009,
0.55686277151107788, 0.55686277151107788), (0.44957983493804932,
0.55294120311737061, 0.55294120311737061), (0.45378151535987854,
0.54901963472366333, 0.54901963472366333), (0.45798319578170776,
0.54509806632995605, 0.54509806632995605), (0.46218487620353699,
0.54117649793624878, 0.54117649793624878), (0.46638655662536621,
0.5372549295425415, 0.5372549295425415), (0.47058823704719543,
0.53333336114883423, 0.53333336114883423), (0.47478991746902466,
0.52549022436141968, 0.52549022436141968), (0.47899159789085388,
0.5215686559677124, 0.5215686559677124), (0.48319327831268311,
0.51764708757400513, 0.51764708757400513), (0.48739495873451233,
0.51372551918029785, 0.51372551918029785), (0.49159663915634155,
0.50980395078659058, 0.50980395078659058), (0.49579831957817078,
0.5058823823928833, 0.5058823823928833), (0.5, 0.50196081399917603,
0.50196081399917603), (0.50420171022415161, 0.49803921580314636,
0.49803921580314636), (0.50840336084365845, 0.49411764740943909,
0.49411764740943909), (0.51260507106781006, 0.49019607901573181,
0.49019607901573181), (0.51680672168731689, 0.48627451062202454,
0.48627451062202454), (0.52100843191146851, 0.48235294222831726,
0.48235294222831726), (0.52521008253097534, 0.47843137383460999,
0.47843137383460999), (0.52941179275512695, 0.47450980544090271,
0.47450980544090271), (0.53361344337463379, 0.47058823704719543,
0.47058823704719543), (0.5378151535987854, 0.46274510025978088,
0.46274510025978088), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.45490196347236633,
0.45490196347236633), (0.55042016506195068, 0.45098039507865906,
0.45098039507865906), (0.55462187528610229, 0.44705882668495178,
0.44705882668495178), (0.55882352590560913, 0.44313725829124451,
0.44313725829124451), (0.56302523612976074, 0.43921568989753723,
0.43921568989753723), (0.56722688674926758, 0.43529412150382996,
0.43529412150382996), (0.57142859697341919, 0.43137255311012268,
0.43137255311012268), (0.57563024759292603, 0.42745098471641541,
0.42745098471641541), (0.57983195781707764, 0.42352941632270813,
0.42352941632270813), (0.58403360843658447, 0.41960784792900085,
0.41960784792900085), (0.58823531866073608, 0.41568627953529358,
0.41568627953529358), (0.59243696928024292, 0.4117647111415863,
0.4117647111415863), (0.59663867950439453, 0.40784314274787903,
0.40784314274787903), (0.60084033012390137, 0.40000000596046448,
0.40000000596046448), (0.60504204034805298, 0.3960784375667572,
0.3960784375667572), (0.60924369096755981, 0.39215686917304993,
0.39215686917304993), (0.61344540119171143, 0.38823530077934265,
0.38823530077934265), (0.61764705181121826, 0.38431373238563538,
0.38431373238563538), (0.62184876203536987, 0.3803921639919281,
0.3803921639919281), (0.62605041265487671, 0.37647059559822083,
0.37647059559822083), (0.63025212287902832, 0.37254902720451355,
0.37254902720451355), (0.63445377349853516, 0.36862745881080627,
0.36862745881080627), (0.63865548372268677, 0.364705890417099,
0.364705890417099), (0.6428571343421936, 0.36078432202339172,
0.36078432202339172), (0.64705884456634521, 0.35686275362968445,
0.35686275362968445), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.3490196168422699,
0.3490196168422699), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.33725491166114807,
0.33725491166114807), (0.66806721687316895, 0.3333333432674408,
0.3333333432674408), (0.67226892709732056, 0.32941177487373352,
0.32941177487373352), (0.67647057771682739, 0.32549020648002625,
0.32549020648002625), (0.680672287940979, 0.32156863808631897,
0.32156863808631897), (0.68487393856048584, 0.31764706969261169,
0.31764706969261169), (0.68907564878463745, 0.31372550129890442,
0.31372550129890442), (0.69327729940414429, 0.30980393290519714,
0.30980393290519714), (0.6974790096282959, 0.30588236451148987,
0.30588236451148987), (0.70168066024780273, 0.30196079611778259,
0.30196079611778259), (0.70588237047195435, 0.29803922772407532,
0.29803922772407532), (0.71008402109146118, 0.29411765933036804,
0.29411765933036804), (0.71428573131561279, 0.29019609093666077,
0.29019609093666077), (0.71848738193511963, 0.28627452254295349,
0.28627452254295349), (0.72268909215927124, 0.28235295414924622,
0.28235295414924622), (0.72689074277877808, 0.27450981736183167,
0.27450981736183167), (0.73109245300292969, 0.27058824896812439,
0.27058824896812439), (0.73529410362243652, 0.26666668057441711,
0.26666668057441711), (0.73949581384658813, 0.26274511218070984,
0.26274511218070984), (0.74369746446609497, 0.25882354378700256,
0.25882354378700256), (0.74789917469024658, 0.25490197539329529,
0.25490197539329529), (0.75210082530975342, 0.25098040699958801,
0.25098040699958801), (0.75630253553390503, 0.24705882370471954,
0.24705882370471954), (0.76050418615341187, 0.24313725531101227,
0.24313725531101227), (0.76470589637756348, 0.23921568691730499,
0.23921568691730499), (0.76890754699707031, 0.23529411852359772,
0.23529411852359772), (0.77310925722122192, 0.23137255012989044,
0.23137255012989044), (0.77731090784072876, 0.22745098173618317,
0.22745098173618317), (0.78151261806488037, 0.22352941334247589,
0.22352941334247589), (0.78571426868438721, 0.21960784494876862,
0.21960784494876862), (0.78991597890853882, 0.21176470816135406,
0.21176470816135406), (0.79411762952804565, 0.20784313976764679,
0.20784313976764679), (0.79831933975219727, 0.20392157137393951,
0.20392157137393951), (0.8025209903717041, 0.20000000298023224,
0.20000000298023224), (0.80672270059585571, 0.19607843458652496,
0.19607843458652496), (0.81092435121536255, 0.19215686619281769,
0.19215686619281769), (0.81512606143951416, 0.18823529779911041,
0.18823529779911041), (0.819327712059021, 0.18431372940540314,
0.18431372940540314), (0.82352942228317261, 0.18039216101169586,
0.18039216101169586), (0.82773107290267944, 0.17647059261798859,
0.17647059261798859), (0.83193278312683105, 0.17254902422428131,
0.17254902422428131), (0.83613443374633789, 0.16862745583057404,
0.16862745583057404), (0.8403361439704895, 0.16470588743686676,
0.16470588743686676), (0.84453779458999634, 0.16078431904315948,
0.16078431904315948), (0.84873950481414795, 0.15686275064945221,
0.15686275064945221), (0.85294115543365479, 0.14901961386203766,
0.14901961386203766), (0.8571428656578064, 0.14509804546833038,
0.14509804546833038), (0.86134451627731323, 0.14117647707462311,
0.14117647707462311), (0.86554622650146484, 0.13725490868091583,
0.13725490868091583), (0.86974787712097168, 0.13333334028720856,
0.13333334028720856), (0.87394958734512329, 0.12941177189350128,
0.12941177189350128), (0.87815123796463013, 0.12549020349979401,
0.12549020349979401), (0.88235294818878174, 0.12156862765550613,
0.12156862765550613), (0.88655459880828857, 0.11764705926179886,
0.11764705926179886), (0.89075630903244019, 0.11372549086809158,
0.11372549086809158), (0.89495795965194702, 0.10980392247438431,
0.10980392247438431), (0.89915966987609863, 0.10588235408067703,
0.10588235408067703), (0.90336132049560547, 0.10196078568696976,
0.10196078568696976), (0.90756303071975708, 0.098039217293262482,
0.098039217293262482), (0.91176468133926392, 0.094117648899555206,
0.094117648899555206), (0.91596639156341553, 0.086274512112140656,
0.086274512112140656), (0.92016804218292236, 0.08235294371843338,
0.08235294371843338), (0.92436975240707397, 0.078431375324726105,
0.078431375324726105), (0.92857140302658081, 0.074509806931018829,
0.074509806931018829), (0.93277311325073242, 0.070588238537311554,
0.070588238537311554), (0.93697476387023926, 0.066666670143604279,
0.066666670143604279), (0.94117647409439087, 0.062745101749897003,
0.062745101749897003), (0.94537812471389771, 0.058823529630899429,
0.058823529630899429), (0.94957983493804932, 0.054901961237192154,
0.054901961237192154), (0.95378148555755615, 0.050980392843484879,
0.050980392843484879), (0.95798319578170776, 0.047058824449777603,
0.047058824449777603), (0.9621848464012146, 0.043137256056070328,
0.043137256056070328), (0.96638655662536621, 0.039215687662363052,
0.039215687662363052), (0.97058820724487305, 0.035294119268655777,
0.035294119268655777), (0.97478991746902466, 0.031372550874948502,
0.031372550874948502), (0.97899156808853149, 0.023529412224888802,
0.023529412224888802), (0.98319327831268311, 0.019607843831181526,
0.019607843831181526), (0.98739492893218994, 0.015686275437474251,
0.015686275437474251), (0.99159663915634155, 0.011764706112444401,
0.011764706112444401), (0.99579828977584839, 0.0078431377187371254,
0.0078431377187371254), (1.0, 0.0039215688593685627,
0.0039215688593685627)], 'green': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)], 'red': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)]}
Accent = colors.LinearSegmentedColormap('Accent', _Accent_data, LUTSIZE)
Blues = colors.LinearSegmentedColormap('Blues', _Blues_data, LUTSIZE)
BrBG = colors.LinearSegmentedColormap('BrBG', _BrBG_data, LUTSIZE)
BuGn = colors.LinearSegmentedColormap('BuGn', _BuGn_data, LUTSIZE)
BuPu = colors.LinearSegmentedColormap('BuPu', _BuPu_data, LUTSIZE)
Dark2 = colors.LinearSegmentedColormap('Dark2', _Dark2_data, LUTSIZE)
GnBu = colors.LinearSegmentedColormap('GnBu', _GnBu_data, LUTSIZE)
Greens = colors.LinearSegmentedColormap('Greens', _Greens_data, LUTSIZE)
Greys = colors.LinearSegmentedColormap('Greys', _Greys_data, LUTSIZE)
Oranges = colors.LinearSegmentedColormap('Oranges', _Oranges_data, LUTSIZE)
OrRd = colors.LinearSegmentedColormap('OrRd', _OrRd_data, LUTSIZE)
Paired = colors.LinearSegmentedColormap('Paired', _Paired_data, LUTSIZE)
Pastel1 = colors.LinearSegmentedColormap('Pastel1', _Pastel1_data, LUTSIZE)
Pastel2 = colors.LinearSegmentedColormap('Pastel2', _Pastel2_data, LUTSIZE)
PiYG = colors.LinearSegmentedColormap('PiYG', _PiYG_data, LUTSIZE)
PRGn = colors.LinearSegmentedColormap('PRGn', _PRGn_data, LUTSIZE)
PuBu = colors.LinearSegmentedColormap('PuBu', _PuBu_data, LUTSIZE)
PuBuGn = colors.LinearSegmentedColormap('PuBuGn', _PuBuGn_data, LUTSIZE)
PuOr = colors.LinearSegmentedColormap('PuOr', _PuOr_data, LUTSIZE)
PuRd = colors.LinearSegmentedColormap('PuRd', _PuRd_data, LUTSIZE)
Purples = colors.LinearSegmentedColormap('Purples', _Purples_data, LUTSIZE)
RdBu = colors.LinearSegmentedColormap('RdBu', _RdBu_data, LUTSIZE)
RdGy = colors.LinearSegmentedColormap('RdGy', _RdGy_data, LUTSIZE)
RdPu = colors.LinearSegmentedColormap('RdPu', _RdPu_data, LUTSIZE)
RdYlBu = colors.LinearSegmentedColormap('RdYlBu', _RdYlBu_data, LUTSIZE)
RdYlGn = colors.LinearSegmentedColormap('RdYlGn', _RdYlGn_data, LUTSIZE)
Reds = colors.LinearSegmentedColormap('Reds', _Reds_data, LUTSIZE)
Set1 = colors.LinearSegmentedColormap('Set1', _Set1_data, LUTSIZE)
Set2 = colors.LinearSegmentedColormap('Set2', _Set2_data, LUTSIZE)
Set3 = colors.LinearSegmentedColormap('Set3', _Set3_data, LUTSIZE)
Spectral = colors.LinearSegmentedColormap('Spectral', _Spectral_data, LUTSIZE)
YlGn = colors.LinearSegmentedColormap('YlGn', _YlGn_data, LUTSIZE)
YlGnBu = colors.LinearSegmentedColormap('YlGnBu', _YlGnBu_data, LUTSIZE)
YlOrBr = colors.LinearSegmentedColormap('YlOrBr', _YlOrBr_data, LUTSIZE)
YlOrRd = colors.LinearSegmentedColormap('YlOrRd', _YlOrRd_data, LUTSIZE)
gist_earth = colors.LinearSegmentedColormap('gist_earth', _gist_earth_data, LUTSIZE)
gist_gray = colors.LinearSegmentedColormap('gist_gray', _gist_gray_data, LUTSIZE)
gist_heat = colors.LinearSegmentedColormap('gist_heat', _gist_heat_data, LUTSIZE)
gist_ncar = colors.LinearSegmentedColormap('gist_ncar', _gist_ncar_data, LUTSIZE)
gist_rainbow = colors.LinearSegmentedColormap('gist_rainbow', _gist_rainbow_data, LUTSIZE)
gist_stern = colors.LinearSegmentedColormap('gist_stern', _gist_stern_data, LUTSIZE)
gist_yarg = colors.LinearSegmentedColormap('gist_yarg', _gist_yarg_data, LUTSIZE)
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def revcmap(data):
data_r = {}
for key, val in data.iteritems():
valnew = [(1.-a, b, c) for a, b, c in reversed(val)]
data_r[key] = valnew
return data_r
cmapnames = datad.keys()
for cmapname in cmapnames:
cmapname_r = cmapname+'_r'
cmapdat_r = revcmap(datad[cmapname])
datad[cmapname_r] = cmapdat_r
locals()[cmapname_r] = colors.LinearSegmentedColormap(cmapname_r, cmapdat_r, LUTSIZE)
|
agpl-3.0
|
manashmndl/scikit-learn
|
examples/calibration/plot_calibration_curve.py
|
225
|
5903
|
"""
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
|
bsd-3-clause
|
cgre-aachen/gempy
|
examples/tutorials/ch5_probabilistic_modeling_DEP/aux_functions/__init__.py
|
1
|
1490
|
import gempy as gp
import numpy as np
import matplotlib.pyplot as plt
def plot_geo_setting(geo_model):
device_loc = np.array([[6e3, 0, 3700]])
p2d = gp.plot_2d(geo_model, show_topography=True)
well_1 = 3.5e3
well_2 = 3.6e3
p2d.axes[0].scatter([3e3], [well_1], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter([9e3], [well_2], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter(device_loc[:, 0], device_loc[:, 2], marker='x', s=400, c='#DA8886', zorder=10)
p2d.axes[0].vlines(3e3, .5e3, well_1, linewidth=4, color='gray')
p2d.axes[0].vlines(9e3, .5e3, well_2, linewidth=4, color='gray')
p2d.axes[0].vlines(3e3, .5e3, well_1)
p2d.axes[0].vlines(9e3, .5e3, well_2)
plt.show()
def plot_geo_setting_well(geo_model):
device_loc = np.array([[6e3, 0, 3700]])
p2d = gp.plot_2d(geo_model, show_topography=True, legend=False)
well_1 = 3.41e3
well_2 = 3.6e3
p2d.axes[0].scatter([3e3], [well_1], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter([9e3], [well_2], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter(device_loc[:, 0], device_loc[:, 2], marker='x', s=400, c='#DA8886', zorder=10)
p2d.axes[0].vlines(3e3, .5e3, well_1, linewidth=4, color='gray')
p2d.axes[0].vlines(9e3, .5e3, well_2, linewidth=4, color='gray')
p2d.axes[0].vlines(3e3, .5e3, well_1)
p2d.axes[0].vlines(9e3, .5e3, well_2)
p2d.axes[0].set_xlim(2900, 3100)
plt.show()
|
lgpl-3.0
|
AlexRobson/scikit-learn
|
examples/mixture/plot_gmm_sin.py
|
248
|
2747
|
"""
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
jundongl/PyFeaST
|
skfeature/utility/construct_W.py
|
3
|
17385
|
import numpy as np
from scipy.sparse import *
from sklearn.metrics.pairwise import pairwise_distances
def construct_W(X, **kwargs):
"""
Construct the affinity matrix W through different ways
Notes
-----
if kwargs is null, use the default parameter settings;
if kwargs is not null, construct the affinity matrix according to parameters in kwargs
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
kwargs: {dictionary}
parameters to construct different affinity matrix W:
y: {numpy array}, shape (n_samples, 1)
the true label information needed under the 'supervised' neighbor mode
metric: {string}
choices for different distance measures
'euclidean' - use euclidean distance
'cosine' - use cosine distance (default)
neighbor_mode: {string}
indicates how to construct the graph
'knn' - put an edge between two nodes if and only if they are among the
k nearest neighbors of each other (default)
'supervised' - put an edge between two nodes if they belong to same class
and they are among the k nearest neighbors of each other
weight_mode: {string}
indicates how to assign weights for each edge in the graph
'binary' - 0-1 weighting, every edge receives weight of 1 (default)
'heat_kernel' - if nodes i and j are connected, put weight W_ij = exp(-norm(x_i - x_j)/2t^2)
this weight mode can only be used under 'euclidean' metric and you are required
to provide the parameter t
'cosine' - if nodes i and j are connected, put weight cosine(x_i,x_j).
this weight mode can only be used under 'cosine' metric
k: {int}
choices for the number of neighbors (default k = 5)
t: {float}
parameter for the 'heat_kernel' weight_mode
fisher_score: {boolean}
indicates whether to build the affinity matrix in a fisher score way, in which W_ij = 1/n_l if yi = yj = l;
otherwise W_ij = 0 (default fisher_score = false)
reliefF: {boolean}
indicates whether to build the affinity matrix in a reliefF way, NH(x) and NM(x,y) denotes a set of
k nearest points to x with the same class as x, and a different class (the class y), respectively.
W_ij = 1 if i = j; W_ij = 1/k if x_j \in NH(x_i); W_ij = -1/(c-1)k if x_j \in NM(x_i, y) (default reliefF = false)
Output
------
W: {sparse matrix}, shape (n_samples, n_samples)
output affinity matrix W
"""
# default metric is 'cosine'
if 'metric' not in kwargs.keys():
kwargs['metric'] = 'cosine'
# default neighbor mode is 'knn' and default neighbor size is 5
if 'neighbor_mode' not in kwargs.keys():
kwargs['neighbor_mode'] = 'knn'
if kwargs['neighbor_mode'] == 'knn' and 'k' not in kwargs.keys():
kwargs['k'] = 5
if kwargs['neighbor_mode'] == 'supervised' and 'k' not in kwargs.keys():
kwargs['k'] = 5
if kwargs['neighbor_mode'] == 'supervised' and 'y' not in kwargs.keys():
print ('Warning: label is required in the supervised neighborMode!!!')
exit(0)
# default weight mode is 'binary', default t in heat kernel mode is 1
if 'weight_mode' not in kwargs.keys():
kwargs['weight_mode'] = 'binary'
if kwargs['weight_mode'] == 'heat_kernel':
if kwargs['metric'] != 'euclidean':
kwargs['metric'] = 'euclidean'
if 't' not in kwargs.keys():
kwargs['t'] = 1
elif kwargs['weight_mode'] == 'cosine':
if kwargs['metric'] != 'cosine':
kwargs['metric'] = 'cosine'
# default fisher_score and reliefF mode are 'false'
if 'fisher_score' not in kwargs.keys():
kwargs['fisher_score'] = False
if 'reliefF' not in kwargs.keys():
kwargs['reliefF'] = False
n_samples, n_features = np.shape(X)
# choose 'knn' neighbor mode
if kwargs['neighbor_mode'] == 'knn':
k = kwargs['k']
if kwargs['weight_mode'] == 'binary':
if kwargs['metric'] == 'euclidean':
# compute pairwise euclidean distances
D = pairwise_distances(X)
D **= 2
# sort the distance matrix D in ascending order
dump = np.sort(D, axis=1)
idx = np.argsort(D, axis=1)
# choose the k-nearest neighbors for each instance
idx_new = idx[:, 0:k+1]
G = np.zeros((n_samples*(k+1), 3))
G[:, 0] = np.tile(np.arange(n_samples), (k+1, 1)).reshape(-1)
G[:, 1] = np.ravel(idx_new, order='F')
G[:, 2] = 1
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
elif kwargs['metric'] == 'cosine':
# normalize the data first
X_normalized = np.power(np.sum(X*X, axis=1), 0.5)
for i in range(n_samples):
X[i, :] = X[i, :]/max(1e-12, X_normalized[i])
# compute pairwise cosine distances
D_cosine = np.dot(X, np.transpose(X))
# sort the distance matrix D in descending order
dump = np.sort(-D_cosine, axis=1)
idx = np.argsort(-D_cosine, axis=1)
idx_new = idx[:, 0:k+1]
G = np.zeros((n_samples*(k+1), 3))
G[:, 0] = np.tile(np.arange(n_samples), (k+1, 1)).reshape(-1)
G[:, 1] = np.ravel(idx_new, order='F')
G[:, 2] = 1
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
elif kwargs['weight_mode'] == 'heat_kernel':
t = kwargs['t']
# compute pairwise euclidean distances
D = pairwise_distances(X)
D **= 2
# sort the distance matrix D in ascending order
dump = np.sort(D, axis=1)
idx = np.argsort(D, axis=1)
idx_new = idx[:, 0:k+1]
dump_new = dump[:, 0:k+1]
# compute the pairwise heat kernel distances
dump_heat_kernel = np.exp(-dump_new/(2*t*t))
G = np.zeros((n_samples*(k+1), 3))
G[:, 0] = np.tile(np.arange(n_samples), (k+1, 1)).reshape(-1)
G[:, 1] = np.ravel(idx_new, order='F')
G[:, 2] = np.ravel(dump_heat_kernel, order='F')
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
elif kwargs['weight_mode'] == 'cosine':
# normalize the data first
X_normalized = np.power(np.sum(X*X, axis=1), 0.5)
for i in range(n_samples):
X[i, :] = X[i, :]/max(1e-12, X_normalized[i])
# compute pairwise cosine distances
D_cosine = np.dot(X, np.transpose(X))
# sort the distance matrix D in ascending order
dump = np.sort(-D_cosine, axis=1)
idx = np.argsort(-D_cosine, axis=1)
idx_new = idx[:, 0:k+1]
dump_new = -dump[:, 0:k+1]
G = np.zeros((n_samples*(k+1), 3))
G[:, 0] = np.tile(np.arange(n_samples), (k+1, 1)).reshape(-1)
G[:, 1] = np.ravel(idx_new, order='F')
G[:, 2] = np.ravel(dump_new, order='F')
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
# choose supervised neighborMode
elif kwargs['neighbor_mode'] == 'supervised':
k = kwargs['k']
# get true labels and the number of classes
y = kwargs['y']
label = np.unique(y)
n_classes = np.unique(y).size
# construct the weight matrix W in a fisherScore way, W_ij = 1/n_l if yi = yj = l, otherwise W_ij = 0
if kwargs['fisher_score'] is True:
W = lil_matrix((n_samples, n_samples))
for i in range(n_classes):
class_idx = (y == label[i])
class_idx_all = (class_idx[:, np.newaxis] & class_idx[np.newaxis, :])
W[class_idx_all] = 1.0/np.sum(np.sum(class_idx))
return W
# construct the weight matrix W in a reliefF way, NH(x) and NM(x,y) denotes a set of k nearest
# points to x with the same class as x, a different class (the class y), respectively. W_ij = 1 if i = j;
# W_ij = 1/k if x_j \in NH(x_i); W_ij = -1/(c-1)k if x_j \in NM(x_i, y)
if kwargs['reliefF'] is True:
# when xj in NH(xi)
G = np.zeros((n_samples*(k+1), 3))
id_now = 0
for i in range(n_classes):
class_idx = np.column_stack(np.where(y == label[i]))[:, 0]
D = pairwise_distances(X[class_idx, :])
D **= 2
idx = np.argsort(D, axis=1)
idx_new = idx[:, 0:k+1]
n_smp_class = (class_idx[idx_new[:]]).size
if len(class_idx) <= k:
k = len(class_idx) - 1
G[id_now:n_smp_class+id_now, 0] = np.tile(class_idx, (k+1, 1)).reshape(-1)
G[id_now:n_smp_class+id_now, 1] = np.ravel(class_idx[idx_new[:]], order='F')
G[id_now:n_smp_class+id_now, 2] = 1.0/k
id_now += n_smp_class
W1 = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
# when i = j, W_ij = 1
for i in range(n_samples):
W1[i, i] = 1
# when x_j in NM(x_i, y)
G = np.zeros((n_samples*k*(n_classes - 1), 3))
id_now = 0
for i in range(n_classes):
class_idx1 = np.column_stack(np.where(y == label[i]))[:, 0]
X1 = X[class_idx1, :]
for j in range(n_classes):
if label[j] != label[i]:
class_idx2 = np.column_stack(np.where(y == label[j]))[:, 0]
X2 = X[class_idx2, :]
D = pairwise_distances(X1, X2)
idx = np.argsort(D, axis=1)
idx_new = idx[:, 0:k]
n_smp_class = len(class_idx1)*k
G[id_now:n_smp_class+id_now, 0] = np.tile(class_idx1, (k, 1)).reshape(-1)
G[id_now:n_smp_class+id_now, 1] = np.ravel(class_idx2[idx_new[:]], order='F')
G[id_now:n_smp_class+id_now, 2] = -1.0/((n_classes-1)*k)
id_now += n_smp_class
W2 = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W2) > W2
W2 = W2 - W2.multiply(bigger) + np.transpose(W2).multiply(bigger)
W = W1 + W2
return W
if kwargs['weight_mode'] == 'binary':
if kwargs['metric'] == 'euclidean':
G = np.zeros((n_samples*(k+1), 3))
id_now = 0
for i in range(n_classes):
class_idx = np.column_stack(np.where(y == label[i]))[:, 0]
# compute pairwise euclidean distances for instances in class i
D = pairwise_distances(X[class_idx, :])
D **= 2
# sort the distance matrix D in ascending order for instances in class i
idx = np.argsort(D, axis=1)
idx_new = idx[:, 0:k+1]
n_smp_class = len(class_idx)*(k+1)
G[id_now:n_smp_class+id_now, 0] = np.tile(class_idx, (k+1, 1)).reshape(-1)
G[id_now:n_smp_class+id_now, 1] = np.ravel(class_idx[idx_new[:]], order='F')
G[id_now:n_smp_class+id_now, 2] = 1
id_now += n_smp_class
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
if kwargs['metric'] == 'cosine':
# normalize the data first
X_normalized = np.power(np.sum(X*X, axis=1), 0.5)
for i in range(n_samples):
X[i, :] = X[i, :]/max(1e-12, X_normalized[i])
G = np.zeros((n_samples*(k+1), 3))
id_now = 0
for i in range(n_classes):
class_idx = np.column_stack(np.where(y == label[i]))[:, 0]
# compute pairwise cosine distances for instances in class i
D_cosine = np.dot(X[class_idx, :], np.transpose(X[class_idx, :]))
# sort the distance matrix D in descending order for instances in class i
idx = np.argsort(-D_cosine, axis=1)
idx_new = idx[:, 0:k+1]
n_smp_class = len(class_idx)*(k+1)
G[id_now:n_smp_class+id_now, 0] = np.tile(class_idx, (k+1, 1)).reshape(-1)
G[id_now:n_smp_class+id_now, 1] = np.ravel(class_idx[idx_new[:]], order='F')
G[id_now:n_smp_class+id_now, 2] = 1
id_now += n_smp_class
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
elif kwargs['weight_mode'] == 'heat_kernel':
G = np.zeros((n_samples*(k+1), 3))
id_now = 0
for i in range(n_classes):
class_idx = np.column_stack(np.where(y == label[i]))[:, 0]
# compute pairwise cosine distances for instances in class i
D = pairwise_distances(X[class_idx, :])
D **= 2
# sort the distance matrix D in ascending order for instances in class i
dump = np.sort(D, axis=1)
idx = np.argsort(D, axis=1)
idx_new = idx[:, 0:k+1]
dump_new = dump[:, 0:k+1]
t = kwargs['t']
# compute pairwise heat kernel distances for instances in class i
dump_heat_kernel = np.exp(-dump_new/(2*t*t))
n_smp_class = len(class_idx)*(k+1)
G[id_now:n_smp_class+id_now, 0] = np.tile(class_idx, (k+1, 1)).reshape(-1)
G[id_now:n_smp_class+id_now, 1] = np.ravel(class_idx[idx_new[:]], order='F')
G[id_now:n_smp_class+id_now, 2] = np.ravel(dump_heat_kernel, order='F')
id_now += n_smp_class
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
elif kwargs['weight_mode'] == 'cosine':
# normalize the data first
X_normalized = np.power(np.sum(X*X, axis=1), 0.5)
for i in range(n_samples):
X[i, :] = X[i, :]/max(1e-12, X_normalized[i])
G = np.zeros((n_samples*(k+1), 3))
id_now = 0
for i in range(n_classes):
class_idx = np.column_stack(np.where(y == label[i]))[:, 0]
# compute pairwise cosine distances for instances in class i
D_cosine = np.dot(X[class_idx, :], np.transpose(X[class_idx, :]))
# sort the distance matrix D in descending order for instances in class i
dump = np.sort(-D_cosine, axis=1)
idx = np.argsort(-D_cosine, axis=1)
idx_new = idx[:, 0:k+1]
dump_new = -dump[:, 0:k+1]
n_smp_class = len(class_idx)*(k+1)
G[id_now:n_smp_class+id_now, 0] = np.tile(class_idx, (k+1, 1)).reshape(-1)
G[id_now:n_smp_class+id_now, 1] = np.ravel(class_idx[idx_new[:]], order='F')
G[id_now:n_smp_class+id_now, 2] = np.ravel(dump_new, order='F')
id_now += n_smp_class
# build the sparse affinity matrix W
W = csc_matrix((G[:, 2], (G[:, 0], G[:, 1])), shape=(n_samples, n_samples))
bigger = np.transpose(W) > W
W = W - W.multiply(bigger) + np.transpose(W).multiply(bigger)
return W
|
gpl-2.0
|
mlyundin/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
robertvi/rjv
|
plot_compare_kmers.py
|
1
|
1945
|
#!/usr/bin/python
'''
compare kmer positions between two fasta files
usage: plot_compare_kmers kmersize fasta1 fasta2 output.png
'''
#set matplotlib to use a backend suitable for headless operation
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from rjv.fasta import *
from rjv.kmer import *
import sys
if len(sys.argv) < 5:
print 'usage: plot_compare_kmers kmersize fasta1 fasta2 output.png'
exit(0)
kmersize = int(sys.argv[1])
inp1 = sys.argv[2]
inp2 = sys.argv[3]
out = sys.argv[4]
kmer_posn = {}
inp1_sizes = []
inp2_sizes = []
#index position of all kmers
posn = 0
for fa in next_fasta(inp1):
inp1_sizes.append(len(fa['seq']))
for i,kmer in enumerate(next_kmer(fa['seq'],kmersize)):
try:
#create reverse complement of the kmer
rev = revcomp(kmer)
except:
#invalid base found
continue
#convert to canonical kmer
if rev < kmer: kmer = rev
if not kmer in kmer_posn: kmer_posn[kmer] = []
kmer_posn[kmer].append(posn+i)
posn += len(fa['seq'])
xx = []
yy = []
posn = 0
for fa in next_fasta(inp2):
inp2_sizes.append(len(fa['seq']))
for i,kmer in enumerate(next_kmer(fa['seq'],kmersize)):
try:
#create reverse complement of the kmer
rev = revcomp(kmer)
except:
#invalid base found
continue
#convert to canonical kmer
if rev < kmer: kmer = rev
if not kmer in kmer_posn: continue
for x in kmer_posn[kmer]:
xx.append(x)
yy.append(posn+i)
posn += len(fa['seq'])
plt.plot(xx,yy,'ro',alpha=0.5,markersize=0.1)
posn = 0
for x in inp1_sizes[:-1]:
posn += x
plt.axvline(x=posn)
posn = 0
for x in inp2_sizes[:-1]:
posn += x
plt.axhline(y=posn)
plt.savefig(out, dpi=300, bbox_inches='tight')
|
gpl-2.0
|
fengzhyuan/scikit-learn
|
sklearn/manifold/t_sne.py
|
106
|
20057
|
# Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
|
bsd-3-clause
|
durcan/phonon_nrecoil
|
simple_access.py
|
1
|
3730
|
from glob import iglob
import ROOT
import rootpy
from rootpy.tree import Cut
from root_numpy import tree2rec
import pandas as pd
from time import time
def expander(
dtype='cf',
tree='rrqDir/calibzip1',
base='/tera2/data3/cdmsbatsProd/R133/dataReleases/Prodv5-3_June2013/merged/',
data='all',
productions=['all'],
cut='',
cutrev=''):
pbase = base + data + '/' + cutrev + dtype + '/' + cut
if 'bg_permitted' in dtype:
prefix = 'blinded_'
else:
prefix = ''
if 'rrqDir/calib' in tree:
fname = prefix + 'calib_Prodv5-3_{}_??.root'
elif 'rqDir' in tree:
fname = prefix + 'merge_Prodv5-3_{}_??.root'
elif 'cutDir' in tree:
fname = cut.rstrip('/') + '_{}_??.root'
if productions == ['all']:
prod = '0[0-9][0-9][0-9][0-9][0-9]?'
ppath = iglob(pbase + fname.format(prod))
else:
ppath = (pbase + fname.format(i) for i in productions)
result = [i + '/' + tree for i in ppath]
result.sort()
return result
def chainer(
dtype='cf',
izip=1,
base='/tera2/data3/cdmsbatsProd/R133/dataReleases/Prodv5-3_June2013/merged/',
data='all',
productions=['all'],
rqs=[],
eventrqs=[],
rrqs=[],
eventrrqs=[],
cuts=[],
eventcuts=[],
selections=[],
cutrev='current',
load_cut_to_ram=False):
# time call
t1 = time()
# deal with data chains
dchain = ROOT.TChain() # initialize data chain
dlist = []
# initialize first chain with calibebent trees (because they are small)
dpaths = expander(
dtype=dtype,
tree='rrqDir/calibevent',
base=base, data=data,
productions=productions)
map(dchain.Add, dpaths)
# then make a list of chains for the other types
for i, v in {
'rrqDir/calibzip{}': rrqs,
'rqDir/zip{}': rqs,
'rqDir/eventTree': eventrqs}.iteritems():
if len(v) != 0:
tmp = ROOT.TChain()
dpaths = expander(
dtype=dtype,
tree=i.format(izip),
base=base,
productions=productions)
map(tmp.Add, dpaths)
dlist.append(tmp)
# friend each other data tree with the original chain
map(dchain.AddFriend, dlist)
# deal with cuts
clist = {}
for i, v in {
'cutDir/cutzip{}': cuts,
'cutDir/cutevent': eventcuts}.iteritems():
for c in v:
cpaths = expander(
data='cuts',
dtype=dtype,
tree=i.format(izip),
base=base,
productions=productions,
cut=c + '/',
cutrev='current/')
tmp = ROOT.TChain()
#print "cpaths ", cpaths
map(tmp.Add, cpaths)
clist[c] = tmp
#print "adding cuts: ", clist
[dchain.AddFriend(v, k) for k, v in clist.iteritems()]
# build cut selection
cut_string = None
if len(selections) != 0:
cut_string = reduce(
lambda x, y: x & y,
map(
Cut,
selections))
# extract the desired variables from the file turn into a Data Frame
rows = ['SeriesNumber', 'EventNumber']
branches = rrqs+rqs+eventrqs+eventrrqs+rows
if load_cut_to_ram:
branches += cuts+eventcuts
df = pd.pivot_table(
pd.DataFrame(
tree2rec(
dchain,
branches=list(set(branches)),
selection=cut_string)),
rows=rows)
t2 = time()
print "Load time: ", t2-t1, "s"
return df
|
mit
|
UDST/urbanaccess
|
urbanaccess/gtfsfeeds.py
|
1
|
25476
|
import yaml
import pandas as pd
import traceback
import zipfile
import os
import logging as lg
import time
from six.moves.urllib import request
from urbanaccess.utils import log
from urbanaccess import config
# TODO: make class CamelCase
class urbanaccess_gtfsfeeds(object):
"""
A dict of GTFS feeds as {name of GTFS feed or transit service/agency :
URL of feed} to request and
download in the GTFS downloader.
Parameters
----------
gtfs_feeds : dict
dictionary of the name of the transit service or agency GTFS feed
as the key -note: this name will be used as the feed folder name.
If the GTFS feed does not have a agency name in the agency.txt file
this key will be used to name the agency- and
the GTFS feed URL as the value to pass to the GTFS downloader as:
{unique name of GTFS feed or transit service/agency : URL of feed}
"""
def __init__(self,
gtfs_feeds={}):
self.gtfs_feeds = gtfs_feeds
@classmethod
def from_yaml(cls, gtfsfeeddir=os.path.join(config.settings.data_folder,
'gtfsfeeds'),
yamlname='gtfsfeeds.yaml'):
"""
Create an urbanaccess_gtfsfeeds instance from a saved YAML.
Parameters
----------
gtfsfeeddir : str, optional
Directory to load a YAML file.
yamlname : str or file like, optional
File name from which to load a YAML file.
Returns
-------
urbanaccess_gtfsfeeds
"""
if not isinstance(gtfsfeeddir, str):
raise ValueError('gtfsfeeddir must be a string')
if not os.path.exists(gtfsfeeddir):
raise ValueError('{} does not exist or was not found'.format(
gtfsfeeddir))
if not isinstance(yamlname, str):
raise ValueError('yaml must be a string')
yaml_file = os.path.join(gtfsfeeddir, yamlname)
with open(yaml_file, 'r') as f:
yaml_config = yaml.safe_load(f)
if not isinstance(yaml_config, dict):
raise ValueError('{} yamlname is not a dict'.format(yamlname))
validkey = 'gtfs_feeds'
if validkey not in yaml_config.keys():
raise ValueError('key gtfs_feeds was not found in YAML file')
for key in yaml_config['gtfs_feeds'].keys():
if not isinstance(key, str):
raise ValueError('{} must be a string'.format(key))
for value in yaml_config['gtfs_feeds'][key]:
if not isinstance(value, str):
raise ValueError('{} must be a string'.format(value))
unique_url_count = len(
pd.DataFrame.from_dict(yaml_config['gtfs_feeds'], orient='index')[
0].unique())
url_count = len(yaml_config['gtfs_feeds'])
if unique_url_count != url_count:
raise ValueError(
'duplicate values were found when the passed add_dict '
'dictionary was added to the existing dictionary. Feed URL '
'values must be unique.')
gtfsfeeds = cls(gtfs_feeds=yaml_config.get('gtfs_feeds', {}))
log('{} YAML successfully loaded with {} feeds.'.format(yaml_file, len(
yaml_config['gtfs_feeds'])))
return gtfsfeeds
def to_dict(self):
"""
Return a dict representation of an urbanaccess_gtfsfeeds instance.
"""
return {'gtfs_feeds': self.gtfs_feeds}
def add_feed(self, add_dict, replace=False):
"""
Add a dictionary to the urbanaccess_gtfsfeeds instance.
Parameters
----------
add_dict : dict
Dictionary to add to existing urbanaccess_gtfsfeeds with the name
of the transit service or agency GTFS feed as the key and the
GTFS feed URL as the value to pass to the GTFS downloader
as:
{unique name of GTFS feed or transit service/agency : URL of feed}
replace : bool, optional
If key of dict is already in the UrbanAccess replace
the existing dict value with the value passed
"""
if not isinstance(add_dict, dict):
raise ValueError('add_dict is not a dict')
if not isinstance(replace, bool):
raise ValueError('replace is not bool')
if replace is not True:
for key in add_dict.keys():
if key in self.gtfs_feeds.keys():
raise ValueError(
'{} passed in add_dict already exists in gtfs_feeds. '
'Only unique keys are allowed to be added.'.format(
key))
if not isinstance(key, str):
raise ValueError('{} must be a string'.format(key))
for value in add_dict[key]:
if not isinstance(value, str):
raise ValueError('{} must be a string'.format(value))
for key, value in add_dict.items():
if value in self.gtfs_feeds.values():
raise ValueError('duplicate values were found when the '
'passed add_dict dictionary was added to '
'the existing dictionary. Feed URL '
'values must be unique.')
gtfs_feeds = self.gtfs_feeds.update(add_dict)
else:
for key in add_dict.keys():
if key in self.gtfs_feeds.keys():
log('{} passed in add_dict will replace existing {} feed '
'in gtfs_feeds.'.format(key, key))
if not isinstance(key, str):
raise ValueError('{} must be a string'.format(key))
for value in add_dict[key]:
if not isinstance(value, str):
raise ValueError('{} must be a string'.format(value))
gtfs_feeds = self.gtfs_feeds.update(add_dict)
log('Added {} feeds to gtfs_feeds: {}'.format(len(add_dict), add_dict))
return gtfs_feeds
def remove_feed(self, del_key=None, remove_all=False):
"""
Remove GTFS feeds from the existing urbanaccess_gtfsfeeds instance
Parameters
----------
del_key : str or list, optional
dict keys as a single string or list of
strings to remove from existing
remove_all : bool, optional
if true, remove all keys from existing
urbanaccess_gtfsfeeds instance
"""
if not isinstance(remove_all, bool):
raise ValueError('remove_all is not bool')
if del_key is None and remove_all:
self.gtfs_feeds = {}
log('Removed all feeds from gtfs_feeds')
else:
if not isinstance(del_key, (list, str)):
raise ValueError('del_key must be a string or list of strings')
if remove_all:
raise ValueError(
'remove_all must be False in order to remove individual '
'records: {}'.format(del_key))
del_key = [del_key]
for key in del_key:
if key not in self.gtfs_feeds.keys():
raise ValueError(
'{} key to delete was not found in gtfs_feeds'.format(
key))
del self.gtfs_feeds[key]
log('Removed {} feed from gtfs_feeds'.format(key))
def to_yaml(self, gtfsfeeddir=os.path.join(config.settings.data_folder,
'gtfsfeeds'),
yamlname='gtfsfeeds.yaml',
overwrite=False):
"""
Save an urbanaccess_gtfsfeeds representation to a YAML file.
Parameters
----------
gtfsfeeddir : str, optional
Directory to save a YAML file.
yamlname : str or file like, optional
File name to which to save a YAML file.
overwrite : bool, optional
if true, overwrite an existing same name YAML file in specified
directory
Returns
-------
Nothing
"""
if not isinstance(gtfsfeeddir, str):
raise ValueError('gtfsfeeddir must be a string')
if not os.path.exists(gtfsfeeddir):
log(
'{} does not exist or was not found and will be '
'created'.format(
gtfsfeeddir))
os.makedirs(gtfsfeeddir)
if not isinstance(yamlname, str):
raise ValueError('yaml must be a string')
yaml_file = os.path.join(gtfsfeeddir, yamlname)
if overwrite is False and os.path.isfile(yaml_file) is True:
raise ValueError(
'{} already exists. Rename or turn overwrite to True'.format(
yamlname))
else:
with open(yaml_file, 'w') as f:
yaml.dump(self.to_dict(), f, default_flow_style=False)
log('{} file successfully created'.format(yaml_file))
# instantiate the UrbanAccess GTFS feed object
feeds = urbanaccess_gtfsfeeds()
def search(api='gtfsdataexch', search_text=None, search_field=None,
match='contains', add_feed=False, overwrite_feed=False):
"""
Connect to a GTFS feed repository API and search for GTFS feeds that exist
in a remote GTFS repository and whether or not to add the GTFS feed name
and download URL to the urbanaccess_gtfsfeeds instance.
Currently only supports access to the GTFS Data Exchange API.
Parameters
----------
api : {'gtfsdataexch'}, optional
name of GTFS feed repository to search in. name corresponds to the
dict specified in the urbanacess_config instance. Currently only
supports access to the GTFS Data Exchange repository.
search_text : str, optional
string pattern to search for
search_field : string or list, optional
name of the field or column to search for string
match : {'contains', 'exact'}, optional
search string matching method as either: contains or exact
add_feed : bool, optional
add search results to existing urbanaccess_gtfsfeeds instance using
the name field as the key and the URL as the value
overwrite_feed : bool, optional
If true the existing urbanaccess_gtfsfeeds instance will be replaced
with the records returned in the search results.
All existing records will be removed.
Returns
-------
search_result_df : pandas.DataFrame
Dataframe of search results displaying full feed metadata
"""
log(
'Note: Your use of a GTFS feed is governed by each GTFS feed author '
'license terms. It is suggested you read the respective license '
'terms for the appropriate use of a GTFS feed.',
level=lg.WARNING)
if not isinstance(api, str):
raise ValueError('{} must be a string'.format(api))
if api not in config.settings.gtfs_api.keys():
raise ValueError('{} is not currently a supported API'.format(api))
if config.settings.gtfs_api[api] is None or not isinstance(
config.settings.gtfs_api[api], str):
raise ValueError('{} is not defined or defined incorrectly'.format(
api))
if not isinstance(match, str) or match not in ['contains', 'exact']:
raise ValueError('match must be either: contains or exact')
if not isinstance(add_feed, bool):
raise ValueError('add_feed must be bool')
if api == 'gtfsdataexch':
log(
'Warning: The GTFSDataExchange is no longer being maintained as '
'of Summer 2016. '
'Data accessed here may be out of date.', level=lg.WARNING)
feed_table = pd.read_table(config.settings.gtfs_api[api], sep=',')
feed_table['date_added'] = pd.to_datetime(feed_table['date_added'],
unit='s')
feed_table['date_last_updated'] = pd.to_datetime(
feed_table['date_last_updated'], unit='s')
if search_text is None:
log(
'No search parameters were passed. Returning full list of {} '
'GTFS feeds:'.format(
len(feed_table)))
return feed_table
else:
pass
search_result_df = pd.DataFrame()
if search_field is None:
search_field = ['name', 'url', 'dataexchange_id', 'feed_baseurl']
else:
if not isinstance(search_field, list):
raise ValueError('search_field is not list')
for field in search_field:
if field not in feed_table.columns:
raise ValueError(
'{} column not found in available feed table'.format(
field))
for col in feed_table.select_dtypes(include=[object]).columns:
if isinstance(search_text, str):
search_text = [search_text]
else:
if not isinstance(search_text, list):
raise ValueError('search_text is not list')
for text in search_text:
if match == 'contains':
search_result = feed_table[
feed_table[col].str.contains(text, case=False,
na=False)]
if match == 'exact':
search_result = feed_table[
feed_table[col].str.match(text, case=False,
na=False)]
search_result_df = search_result_df.append(search_result)
search_result_df.drop_duplicates(inplace=True)
log('Found {} records that matched {} inside {} columns:'.format(
len(search_result_df), search_text, search_field))
if len(search_result_df) != 0:
if add_feed:
if overwrite_feed:
zip_url = search_result_df[
'dataexchange_url'] + 'latest.zip'
search_result_df['dataexchange_url'] = zip_url
search_result_dict = search_result_df.set_index('name')[
'dataexchange_url'].to_dict()
feeds.gtfs_feeds = search_result_dict
log(
'Replaced all records in gtfs_feed list with the {} '
'found records:'.format(
len(search_result_df)))
else:
zip_url = search_result_df[
'dataexchange_url'] + 'latest.zip'
search_result_df['dataexchange_url'] = zip_url
search_result_dict = search_result_df.set_index('name')[
'dataexchange_url'].to_dict()
feeds.add_feed(search_result_dict)
log('Added {} records to gtfs_feed list:'.format(
len(search_result_df)))
return search_result_dict
else:
return search_result_df
def download(data_folder=os.path.join(config.settings.data_folder),
feed_name=None, feed_url=None, feed_dict=None,
error_pause_duration=5, delete_zips=False):
"""
Connect to the URLs passed in function or the URLs stored in the
urbanaccess_gtfsfeeds instance and download the GTFS feed zipfile(s)
then unzip inside a local root directory. Resulting GTFS feed text files
will be located in the root folder: gtfsfeed_text unless otherwise
specified
Parameters
----------
data_folder : str, optional
directory to download GTFS feed data to
feed_name : str, optional
name of transit agency or service to use to name downloaded zipfile
feed_url : str, optional
corresponding URL to the feed_name to use to download GTFS feed zipfile
feed_dict : dict, optional
Dictionary specifying the name of the transit service or
agency GTFS feed as the key and the GTFS feed URL as the value:
{unique name of GTFS feed or transit service/agency : URL of feed}
error_pause_duration : int, optional
how long to pause in seconds before re-trying requests if error
delete_zips : bool, optional
if true the downloaded zipfiles will be removed
Returns
-------
nothing
"""
if (feed_name is not None and feed_url is None) or (
feed_url is not None and feed_name is None):
raise ValueError(
'Both feed_name and feed_url parameters are required.')
if feed_name is not None and feed_url is not None:
if feed_dict is not None:
raise ValueError('feed_dict is not specified')
if not isinstance(feed_name, str) or not isinstance(feed_url, str):
raise ValueError('either feed_name and or feed_url are not string')
feeds.gtfs_feeds = {feed_name: feed_url}
elif feed_dict is not None:
if feed_name is not None or feed_url is not None:
raise ValueError('either feed_name and or feed_url are not None')
if not isinstance(feed_dict, dict):
raise ValueError('feed_dict is not dict')
for key in feed_dict.keys():
if not isinstance(key, str):
raise ValueError('{} must be a string'.format(key))
for value in feed_dict[key]:
if not isinstance(value, str):
raise ValueError('{} must be a string'.format(value))
for key, value in feed_dict.items():
if value in feeds.gtfs_feeds.values():
raise ValueError(
'duplicate values were found when the passed add_dict '
'dictionary was added to the existing dictionary. Feed '
'URL values must be unique.')
feeds.gtfs_feeds = feed_dict
elif feed_name is None and feed_url is None and feed_dict is None:
if len(feeds.gtfs_feeds) == 0:
raise ValueError('No records were found in passed feed_dict')
feeds.gtfs_feeds
else:
raise ValueError('Passed parameters were incorrect or not specified.')
download_folder = os.path.join(data_folder, 'gtfsfeed_zips')
if not os.path.exists(download_folder):
os.makedirs(download_folder)
log('{} does not exist. Directory was created'.format(download_folder))
log('{:,} GTFS feed(s) will be downloaded here: {}'.format(
len(feeds.gtfs_feeds), download_folder))
start_time1 = time.time()
msg_no_connection_w_status = ('Unable to connect. URL at {} returned '
'status code {} and no data')
msg_no_connection = 'Unable to connect to: {}. Error: {}'
msg_download_succeed = ('{} GTFS feed downloaded successfully. '
'Took {:,.2f} seconds for {:,.1f}KB')
# TODO: add file counter and print number to user
for feed_name_key, feed_url_value in feeds.gtfs_feeds.items():
start_time2 = time.time()
zipfile_path = ''.join([download_folder, '/', feed_name_key, '.zip'])
# add default user-agent header in request to avoid 403 Errors
opener = request.build_opener()
opener.addheaders = [('User-agent', '')]
request.install_opener(opener)
if 'http' in feed_url_value:
try:
status_code = request.urlopen(feed_url_value).getcode()
if status_code == 200:
file = request.urlopen(feed_url_value)
_zipfile_type_check(file=file,
feed_url_value=feed_url_value)
with open(zipfile_path, "wb") as local_file:
local_file.write(file.read())
log(msg_download_succeed.format(
feed_name_key, time.time() - start_time2,
os.path.getsize(zipfile_path)))
elif status_code in [429, 504]:
msg = ('URL at {} returned status code {} and no data. '
'Re-trying request in {:.2f} seconds.')
log(msg.format(feed_url_value, status_code,
error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
try:
file = request.urlopen(feed_url_value)
_zipfile_type_check(file=file,
feed_url_value=feed_url_value)
with open(zipfile_path, "wb") as local_file:
local_file.write(file.read())
except Exception:
log(msg_no_connection_w_status.format(
feed_url_value, status_code),
level=lg.ERROR)
else:
log(msg_no_connection_w_status.format(
feed_url_value, status_code),
level=lg.ERROR)
except Exception:
log(msg_no_connection.format(
feed_url_value, traceback.format_exc()),
level=lg.ERROR)
else:
try:
file = request.urlopen(feed_url_value)
_zipfile_type_check(file=file,
feed_url_value=feed_url_value)
file_path = ''.join(
[download_folder, '/', feed_name_key, '.zip'])
with open(file_path, "wb") as local_file:
local_file.write(file.read())
log(msg_download_succeed.format(
feed_name_key, time.time() - start_time2,
os.path.getsize(zipfile_path)))
except Exception:
log(msg_no_connection.format(
feed_url_value, traceback.format_exc()),
level=lg.ERROR)
log('GTFS feed download completed. Took {:,.2f} seconds'.format(
time.time() - start_time1))
_unzip(zip_rootpath=download_folder, delete_zips=delete_zips)
def _unzip(zip_rootpath, delete_zips=True):
"""
unzip all GTFS feed zipfiles in a root directory with resulting text files
in the root folder: gtfsfeed_text
Parameters
----------
zip_rootpath : string
root directory to place downloaded GTFS feed zipfiles
delete_zips : bool, optional
if true the downloaded zipfiles will be removed
Returns
-------
nothing
"""
start_time = time.time()
unzip_rootpath = os.path.join(os.path.dirname(zip_rootpath),
'gtfsfeed_text')
if not os.path.exists(unzip_rootpath):
os.makedirs(unzip_rootpath)
log('{} does not exist. Directory was created'.format(unzip_rootpath))
zipfilelist = [zipfilename for zipfilename in os.listdir(zip_rootpath) if
zipfilename.endswith(".zip")]
if len(zipfilelist) == 0:
raise ValueError('No zipfiles were found in specified '
'directory: {}'.format(zip_rootpath))
for zfile in zipfilelist:
with zipfile.ZipFile(os.path.join(zip_rootpath, zfile)) as z:
# required to deal with zipfiles that have subdirectories and
# that were created on OSX
filelist = [file for file in z.namelist() if
file.endswith(".txt") and not file.startswith(
"__MACOSX")]
if not os.path.exists(
os.path.join(unzip_rootpath, zfile.replace('.zip', ''))):
os.makedirs(
os.path.join(unzip_rootpath, zfile.replace('.zip', '')))
for file in filelist:
with open(
os.path.join(unzip_rootpath, zfile.replace('.zip', ''),
os.path.basename(file)), 'wb') as f:
f.write(z.read(file))
f.close()
z.close()
log('{} successfully extracted to: {}'.format(zfile, os.path.join(
unzip_rootpath, zfile.replace('.zip', ''))))
if delete_zips:
os.remove(zip_rootpath)
log('Deleted {} folder'.format(zip_rootpath))
log(
'GTFS feed zipfile extraction completed. Took {:,.2f} seconds for {} '
'files'.format(
time.time() - start_time, len(zipfilelist)))
def _zipfile_type_check(file, feed_url_value):
"""
zipfile format checker helper
Parameters
----------
file : addinfourl
loaded zipfile object in memory
feed_url_value : str
URL to download GTFS feed zipfile
Returns
-------
nothing
"""
if 'zip' not in file.info().get('Content-Type') is True \
or 'octet' not in file.info().get('Content-Type') is True:
raise ValueError(
'data requested at {} is not a zipfile. '
'Data must be a zipfile'.format(feed_url_value))
|
agpl-3.0
|
LukeB89/LamBotics-Final-Codes
|
ColourDTCThread.py
|
1
|
3763
|
#!/usr/bin/python
from picamera.array import PiRGBArray
from matplotlib import pyplot as plt
from picamera import PiCamera
import DynamicObjectV2
import numpy as np
import webcolors
import os.path
import time
import cv2
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
Obj = DynamicObjectV2.Class
widths = 440
heigths = 280
resX = 6
resY = 6
count = 0
imc = 0
hue = 0
sat = 0
val = 0
camera = PiCamera()
camera.resolution = (widths, heigths)
camera.framerate = 32
camera.hflip = True
rawCapture = PiRGBArray(camera, size=(widths, heigths))
time.sleep(0.1)
def dec_conv(x):
return format(x, '03d')
def init(self):
# put your self.registerOutput here
self.registerOutput("colourDTC", Obj("R",0,"G",0,"B",0,"NewColor",True,"Working", False))
def run (self):
# put your init and global variables here
# main loop
while 1:
oldRGB = [0,0,0]
newRGB = [0,0,0]
# capture frames from the camera
for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
frame = image.array
size = 20
mag = 0.5
x = (widths/2)- size
y = (heigths/2) - size
w = (widths/2) + size
# Sets up the Image for processing and display
blr = cv2.blur(frame,(10,10))
cv2.rectangle(frame,(x,y),(w,h),(255,0,0),1)
cv2.line(frame, (x,y),(w,h),(255,0,0),1)
cv2.line(frame, (x,h),(w,y),(255,0,0),1)
cv2.circle(frame, (220, 140),2,(0,255,0),2)
# Masks an area such that the colour dectection only views
# A small section of the screen
maskd = np.zeros(blr.shape[:2], np.uint8)
maskd[130:150, 210:230] = 255
# Applies the mask to the image, Calculates the mean of area
# returns three values, Red, Green and Blue
con = cv2.mean(blr,mask = maskd)
Red = int(con[2])
Gre = int(con[1])
Blu = int(con[0])
#Displaying Values
cv2.putText(frame,"Red=(%r)" % Red, (1,20), cv2.FONT_HERSHEY_SIMPLEX, mag, (0,255,0), 2)
cv2.putText(frame,"Green=(%r)" % Gre, (widths/3,20), cv2.FONT_HERSHEY_SIMPLEX, mag, (0,255,0), 2)
cv2.putText(frame,"Blue=(%r)" % Blu, (2*widths/3,20), cv2.FONT_HERSHEY_SIMPLEX, mag, (0,255,0), 2)
# Control to stop repeat sending
newRGB = [Red,Gre,Blu]
if(newRGB != oldRGB):
oldRGB = newRGB
self.output("colourDTC",Obj("R",None,"G",None,"B",None,"NewColour",False,"Working", True))
self.output("colourDTC",Obj("R",Red,"G",Gre,"B",Blu,"NewColour",True,"Working",True))
#Displaying the image
cv2.imwrite("save.png", frame)
new = cv2.imread("save.png")
cv2.imshow('frame', new)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# Check for keypresses
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print "Q Pressed"
break
print "Quitting..."
'''cam.release()'''
break
cv2.destroyAllWindows()
|
mit
|
ekostat/ekostat_calculator
|
test_files/lv_notebook_workspace.py
|
1
|
4602
|
# coding: utf-8
# In[1]:
# Reload when code changed:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
get_ipython().magic('pwd')
# In[2]:
import os
import core
import importlib
importlib.reload(core)
import pandas as pd
pd.__version__
# ### Load directories
# In[3]:
root_directory = os.getcwd()
workspace_directory = root_directory + '/workspaces'
resource_directory = root_directory + '/resources'
# # LOAD WORKSPACES
# ### Load default workspace
# In[4]:
default_workspace = core.WorkSpace(name='default',
parent_directory=workspace_directory,
resource_directory=resource_directory)
# ### Add new workspace
# In[5]:
lv_workspace = core.WorkSpace(name='lv',
parent_directory=workspace_directory,
resource_directory=resource_directory)
# ### Copy files from default workspace to make a clone
# In[6]:
lv_workspace.add_files_from_workspace(default_workspace, overwrite=True)
# ### Load all data in lv_workspace
# In[7]:
lv_workspace.load_all_data()
# # Set first filter and load filtered data
# ### Set first data filter
# In[8]:
# show available waterbodies
workspace_data = lv_workspace.data_handler.get_all_column_data_df()
lst = workspace_data.WATER_TYPE_AREA.unique()
print('Type Areas in dataset:\n{}'.format('\n'.join(lst)))
# In[9]:
lst = workspace_data.SEA_AREA_NAME.unique()
print('Waterbodies in dataset:\n{}'.format('\n'.join(lst)))
# In[10]:
include_WB = ['Gullmarn centralbassäng', 'Rivö fjord', 'Byfjorden', 'Havstensfjorden']
include_stations = []
exclude_stations = []
include_years = ['2015', '2017']
lv_workspace.set_data_filter(step=0, filter_type='include_list', filter_name='SEA_AREA_NAME', data=include_WB)
lv_workspace.set_data_filter(step=0, filter_type='include_list', filter_name='STATN', data=include_stations)
lv_workspace.set_data_filter(step=0, filter_type='exclude_list', filter_name='STATN', data=exclude_stations)
lv_workspace.set_data_filter(step=0, filter_type='include_list', filter_name='MYEAR', data=include_years)
# ### Apply first data filter
# In[11]:
lv_workspace.apply_first_filter() # This sets the first level of data filter in the IndexHandler
# ### Extract filtered data
# In[12]:
data_after_first_filter = lv_workspace.get_filtered_data(level=0) # level=0 means first filter
print('{} rows mathing the filter criteria'.format(len(data_after_first_filter)))
data_after_first_filter.head()
data_after_first_filter.shape
# # Set subset filter and load subset data
# ### Set subset filter
# In[13]:
include_WB = ['Gullmarn centralbassäng', 'Rivö fjord']
include_stations = ['BJÖRKHOLMEN']
# Lägg till något som kan plocka in stationer öven ifrån närliggande WB?
exclude_stations = ['SLÄGGÖ'] # Example that both include and exclude are possible
include_years = ['2016', '2017']
lv_workspace.set_data_filter(step=1, subset='A', filter_type='include_list', filter_name='SEA_AREA_NAME', data=include_WB)
lv_workspace.set_data_filter(step=1, subset='A', filter_type='include_list', filter_name='STATN', data=include_stations)
lv_workspace.set_data_filter(step=1, subset='A', filter_type='exclude_list', filter_name='STATN', data=exclude_stations)
lv_workspace.set_data_filter(step=1, subset='A', filter_type='include_list', filter_name='MYEAR', data=include_years)
# In[ ]:
# ### Apply subset filter
# In[14]:
lv_workspace.apply_subset_filter(subset='A') # Not handled properly by the IndexHandler
lv_workspace.initiate_quality_factors()
#tolerance_filter_file_path = u'D:/Utveckling/GitHub/ekostat_calculator/resources/filters/tolerance_filter_template.txt'
#tolerance_filter = core.ToleranceFilter('test_tolerance_filter', file_path=tolerance_filter_file_path)
#
#lv_workspace.quality_factor_NP.calculate_quality_factor(tolerance_filter)
# ### Extract filtered data
data_after_subset_filter = lv_workspace.get_filtered_data(level=1, subset='A') # level=0 means first filter
print('{} rows mathing the filter criteria'.format(len(data_after_subset_filter)))
data_after_subset_filter.head()
data_after_subset_filter.shape
import numpy as np
np.where(lv_workspace.index_handler.subset_filter)
f = lv_workspace.get_data_filter_object(step=1, subset='A')
f.all_filters
f.exclude_list_filter
f.include_list_filter
#s = lv_workspace.get_step_1_object('A')
#
#s.data_filter.all_filters
#
#
#f0 = lv_workspace.get_data_filter_object(step=0)
#
#
#f0.exclude_list_filter
#
#f0.include_list_filter
|
mit
|
BhallaLab/moose-full
|
moose-examples/paper-2015/Fig5_CellMultiscale/Fig5BCD.py
|
2
|
11428
|
########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under
# the provisions of the GPL.
# This programme illustrates building a panel of multiscale models to
# test neuronal plasticity in different contexts. The simulation is set
# to settle for 5 seconds, then a 2 second tetanus is delivered, then
# the simulation continues for another 50 seconds.
# By default we set it to run the smallest model, that takes about 4 minutes
# to run 57 seconds of simulation time, on an Intel core I7 at
# 2.2 GHz. The big model, VHC-neuron, takes almost 90 minutes.
# This program dumps data to text files for further analysis.
########################################################################
import moogli
import numpy
import time
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('/home/bhalla/moose/trunk/Demos/util')
import rdesigneur as rd
PI = 3.14159265359
useGssa = True
combineSegments = False
#### Choose your favourite model here. #################
elecFileNames = ( "ca1_minimal.p", )
#elecFileNames = ( "ca1_minimal.p", "h10.CNG.swc" )
#elecFileNames = ( "CA1.morph.xml", "ca1_minimal.p", "VHC-neuron.CNG.swc", "h10.CNG.swc" )
synSpineList = []
synDendList = []
probeInterval = 0.1
probeAmplitude = 1.0
tetanusFrequency = 100.0
tetanusAmplitude = 1000
tetanusAmplitudeForSpines = 1000
baselineTime = 5
tetTime = 2
postTetTime = 50
def buildRdesigneur():
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
chanProto = [
['./chans/hd.xml'], \
['./chans/kap.xml'], \
['./chans/kad.xml'], \
['./chans/kdr.xml'], \
['./chans/na3.xml'], \
['./chans/nax.xml'], \
['./chans/CaConc.xml'], \
['./chans/Ca.xml'], \
['./chans/NMDA.xml'], \
['./chans/Glu.xml'] \
]
spineProto = [ \
['makeSpineProto()', 'spine' ]
]
chemProto = [ \
['./chem/' + 'psd53.g', 'ltpModel'] \
]
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are p, g, L, len, dia.
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5", \
"Em", "-58e-3", "initVm", "-65e-3" ], \
[ ".", "#axon#", "RA", "0.5" ] \
]
chanDistrib = [ \
["hd", "#dend#,#apical#", "Gbar", "5e-2*(1+(p*3e4))" ], \
["kdr", "#", "Gbar", "p < 50e-6 ? 500 : 100" ], \
["na3", "#soma#,#dend#,#apical#", "Gbar", "250" ], \
["nax", "#soma#,#axon#", "Gbar", "1250" ], \
["kap", "#axon#,#soma#", "Gbar", "300" ], \
["kap", "#dend#,#apical#", "Gbar", \
"300*(H(100-p*1e6)) * (1+(p*1e4))" ], \
["Ca_conc", "#dend#,#apical#", "tau", "0.0133" ], \
["kad", "#soma#,#dend#,#apical#", "Gbar", \
"300*H(p - 100e-6)*(1+p*1e4)" ], \
["Ca", "#dend#,#apical#", "Gbar", "50" ], \
["glu", "#dend#,#apical#", "Gbar", "200*H(p-200e-6)" ], \
["NMDA", "#dend#,#apical#", "Gbar", "2*H(p-200e-6)" ] \
]
spineDistrib = [ \
["spine", '#apical#', "spineSpacing", "20e-6", \
"spineSpacingDistrib", "2e-6", \
"angle", "0", \
"angleDistrib", str( 2*PI ), \
"size", "1", \
"sizeDistrib", "0.5" ] \
]
chemDistrib = [ \
[ "ltpModel", "#apical#", "install", "1"]
]
######################################################################
# Here we define the mappings across scales. Format:
# sourceObj sourceField destObj destField couplingExpr [wildcard][spatialExpn]
# where the coupling expression is anything a muParser can evaluate,
# using the input variable x. For example: 8e-5 + 300*x
# For now, let's use existing adaptors which take an offset and scale.
######################################################################
adaptorList = [
[ 'Ca_conc', 'Ca', 'psd/Ca_input', 'concInit', 8e-5, 1 ],
[ 'Ca_conc', 'Ca', 'dend/Ca_dend_input', 'concInit', 8e-5, 1 ],
[ 'psd/tot_PSD_R', 'n', 'glu', 'Gbar', 0, 0.01 ],
]
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
rdes = rd.rdesigneur(
useGssa = useGssa, \
combineSegments = combineSegments, \
stealCellFromLibrary = True, \
passiveDistrib = passiveDistrib, \
spineDistrib = spineDistrib, \
chanDistrib = chanDistrib, \
chemDistrib = chemDistrib, \
spineProto = spineProto, \
chanProto = chanProto, \
chemProto = chemProto, \
adaptorList = adaptorList
)
return rdes
def buildPlots( rdes ):
numPlots = 10
caPsd = moose.vec( '/model/chem/psd/Ca' )
caHead = moose.vec( '/model/chem/spine/Ca' )
psdR = moose.vec( '/model/chem/psd/tot_PSD_R' )
numSpines = rdes.spineCompt.mesh.num
assert( 2 * numSpines == len( rdes.spineComptElist ) )
if not moose.exists( '/graphs' ):
moose.Neutral( '/graphs' )
assert( len( caPsd ) == numSpines )
assert( len( caHead ) == numSpines )
if numSpines < numPlots:
caPsdTab = moose.Table2( '/graphs/caPsdTab', numSpines ).vec
caHeadTab = moose.Table2( '/graphs/caHeadTab', numSpines ).vec
psdRtab = moose.Table2( '/graphs/psdRtab', numSpines ).vec
for i in range( numSpines ):
moose.connect( caPsdTab[i], 'requestOut', caPsd[i], 'getConc' )
moose.connect( caHeadTab[i], 'requestOut', caHead[i], 'getConc')
moose.connect( psdRtab[i], 'requestOut', psdR[i], 'getN' )
else:
caPsdTab = moose.Table2( '/graphs/caPsdTab', numPlots ).vec
caHeadTab = moose.Table2( '/graphs/caHeadTab', numPlots ).vec
psdRtab = moose.Table2( '/graphs/psdRtab', numPlots ).vec
dx = numSpines / numPlots
for i in range( numPlots ):
moose.connect( caPsdTab[i], 'requestOut', caPsd[i*dx], 'getConc' )
moose.connect( caHeadTab[i], 'requestOut', caHead[i*dx], 'getConc' )
moose.connect( psdRtab[i], 'requestOut', psdR[i*dx], 'getN' )
vtab = moose.Table( '/graphs/vtab' )
moose.connect( vtab, 'requestOut', rdes.soma, 'getVm' )
eSpineCaTab = moose.Table( '/graphs/eSpineCaTab' )
path = rdes.spineComptElist[1].path + "/Ca_conc"
moose.connect( eSpineCaTab, 'requestOut', path, 'getCa' )
eSpineVmTab = moose.Table( '/graphs/eSpineVmTab' )
moose.connect( eSpineVmTab, 'requestOut', rdes.spineComptElist[1], 'getVm' )
eSpineGkTab = moose.Table( '/graphs/eSpineGkTab' )
path = rdes.spineComptElist[1].path + "/NMDA"
moose.connect( eSpineGkTab, 'requestOut', path, 'getGk' )
def saveAndClearPlots( name ):
print 'saveAndClearPlots( ', name, ' )'
for i in moose.wildcardFind( "/graphs/#" ):
#plot stuff
i.xplot( name + '.xplot', i.name )
moose.delete( "/graphs" )
def printPsd( name ):
# Print the vol, the path dist from soma, the electrotonic dist, and N
psdR = moose.vec( '/model/chem/psd/tot_PSD_R' )
neuronVoxel = moose.element( '/model/chem/spine' ).neuronVoxel
elecComptMap = moose.element( '/model/chem/dend' ).elecComptMap
print "len( neuronVoxel = ", len( neuronVoxel), min( neuronVoxel), max( neuronVoxel)
print len( elecComptMap), elecComptMap[0], elecComptMap[12]
neuron = moose.element( '/model/elec' )
ncompts = neuron.compartments
d = {}
j = 0
for i in ncompts:
#print i
d[i] = j
j += 1
f = open( name + ".txt", 'w' )
for i in range( len( psdR ) ):
n = psdR[i].n
conc = psdR[i].conc
vol = psdR[i].volume
compt = elecComptMap[ neuronVoxel[i] ]
#print compt
segIndex = d[compt[0]]
p = neuron.geometricalDistanceFromSoma[ segIndex ]
L = neuron.electrotonicDistanceFromSoma[ segIndex ]
s = str( i ) + " " + str(n) + " " + str( conc ) + " " + str(p) + " " + str(L) + "\n"
f.write( s )
f.close()
def probeStimulus( time ):
for t in numpy.arange( 0, time, probeInterval ):
moose.start( probeInterval )
for i in synSpineList:
i.activation( probeAmplitude )
def tetanicStimulus( time ):
tetInterval = 1.0/tetanusFrequency
for t in numpy.arange( 0, time, tetInterval ):
moose.start( tetInterval )
for i in synDendList:
i.activation( tetanusAmplitude )
for i in synSpineList:
i.activation( tetanusAmplitudeForSpines )
def main():
global synSpineList
global synDendList
numpy.random.seed( 1234 )
rdes = buildRdesigneur()
for i in elecFileNames:
print i
rdes.cellProtoList = [ ['./cells/' + i, 'elec'] ]
rdes.buildModel( '/model' )
assert( moose.exists( '/model' ) )
synSpineList = moose.wildcardFind( "/model/elec/#head#/glu,/model/elec/#head#/NMDA" )
temp = set( moose.wildcardFind( "/model/elec/#/glu,/model/elec/#/NMDA" ) )
synDendList = list( temp - set( synSpineList ) )
moose.reinit()
buildPlots( rdes )
# Run for baseline, tetanus, and post-tetanic settling time
t1 = time.time()
probeStimulus( baselineTime )
tetanicStimulus( tetTime )
probeStimulus( postTetTime )
print 'real time = ', time.time() - t1
printPsd( i + ".fig5" )
saveAndClearPlots( i + ".fig5" )
moose.delete( '/model' )
rdes.elecid = moose.element( '/' )
if __name__ == '__main__':
main()
|
gpl-2.0
|
INM-6/swan
|
swan/widgets/matplotlib_widget.py
|
1
|
9139
|
"""
Created on Wed Feb, 2013
@author: Christoph Gollan
In this module you can find the :class:`MatplotlibWidget` which is
a matplotlib to PyQt aggregation widget.
To achieve that a :class:`MplCanvas` is needed. This class puts
a matplotlib figure to a canvas (which is already a PyQt
aggregation). So the figure can be used like the normal matplotlib
figure.
On default, the figure will not have a :class:`NavigationToolbar`,
but you can activate it.
"""
from PyQt5 import QtGui, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar2QTAgg
import matplotlib
# matplotlib.use('Qt5Agg')
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
class MplCanvas(FigureCanvasQTAgg):
"""
A class to get a matplotlib figure on a canvas.
"""
def __init__(self):
"""
"""
# create the figure on which the plots can be added
self.fig = Figure(facecolor="white")
# add this figure to a canvas
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
class NavigationToolbar(NavigationToolbar2QTAgg):
"""
A NavigationToolbar extended with some functions for using
in a PyQt aggregation.
**Arguments**
*plotCanvas* (:class:`src.matplotlibwidget.MplCanvas`):
The canvas the toolbar belongs to.
*parent* (:class:`PyQt4.QtGui.QWidget`):
The parent object for the toolbar.
*custom_actions* (boolean):
If true, some custom actions will be created on the toolbar.
"""
def __init__(self, plotCanvas, parent, custom_actions=True):
"""
**Properties**
*_firstChange* (boolean):
Whether or not the y limits of the plots were
changed before.
*_plot_params* (dictionary):
The plot parameters given as (key, value) pare.
"""
NavigationToolbar2QTAgg.__init__(self, plotCanvas, parent)
# properties {
self._firstChange = True
self._plot_params = {"xlim": [0, 38],
"ylim": [-300, 300]}
# }
# custom actions {
if custom_actions:
self.addSeparator()
self.action_reset = self.addAction("Reset", self.onReset)
self.action_reset.setToolTip("Reset the y limits")
self.action_plus = self.addAction("+", self.onPlus)
self.action_plus.setToolTip("Expand the y limits")
self.action_minus = self.addAction("-", self.onMinus)
self.action_minus.setToolTip("Reduce the y limits")
# }
#### General methods ####
def add_action(self, name, handle):
"""
Adds a custom action to the toolbar.
**Arguments**
*name* (string):
The name of the action.
*handle* (method):
The python method that should be called
if you click on the action.
**Returns**
???
"""
return self.addAction(name, handle)
def remove_custom_actions(self):
"""
Removes the initial custom actions from the toolbar.
"""
self.removeAction(self.action_reset)
self.removeAction(self.action_plus)
self.removeAction(self.action_minus)
def remove_actions(self, actions):
"""
Removes default actions from the toolbar.
**Arguments**
*actions* (list of integer):
The indexes of the actions that should be
removed.
"""
for i in actions:
self.removeAction(self.actions()[i])
#### Action handler ####
def onReset(self):
"""
Resets the ylim of all subplots.
"""
if not self._firstChange:
axes_list = self.canvas.fig.get_axes()
for ax in axes_list:
limx = self._plot_params['xlim']
ax.set_xlim(limx[0], limx[1])
limy = self._plot_params['ylim']
ax.set_ylim(limy[0], limy[1])
self.canvas.draw()
def onPlus(self, step=1.0):
"""
Expands the ylim of all subplots.
"""
axes_list = self.canvas.fig.get_axes()
for ax in axes_list:
limy = ax.get_ylim()
limx = ax.get_xlim()
rat = abs((limy[1] - limy[0]) / 2) / abs((limx[1] - limx[0]) / 2)
if self._firstChange:
self._plot_params["ylim"] = limy
self._plot_params["xlim"] = limx
self._firstChange = False
ax.set_ylim(limy[0] / rat, limy[1] / rat)
ax.set_xlim(limx[0] / rat, limx[1] / rat)
self.canvas.draw()
def onMinus(self, step=1.0):
"""
Reduces the ylim of all subplots.
"""
axes_list = self.canvas.fig.get_axes()
for ax in axes_list:
limy = ax.get_ylim()
limx = ax.get_xlim()
rat = abs((limy[1] - limy[0]) / 2) / abs((limx[1] - limx[0]) / 2)
if self._firstChange:
self._plot_params["ylim"] = limy
self._plot_params["xlim"] = limx
self._firstChange = False
ax.set_ylim(limy[0] * rat, limy[1] * rat)
ax.set_xlim(limx[0] * rat, limx[1] * rat)
self.canvas.draw()
class MatplotlibWidget(QtWidgets.QWidget):
"""
A class to have a PyQt widget with a matplotlib figure on it.
**Arguments**
*parent* (:class:`PyQt5.QtGui.QWidget` or None):
The parent of this widget.
Default: None
"""
def __init__(self, parent=None, c_actions=False):
"""
"""
QtGui.QWidget.__init__(self, parent=parent)
vgl = QtGui.QGridLayout(self)
self.vgl = vgl
self.canvas = MplCanvas()
self.naviBar = NavigationToolbar(self.canvas, self, custom_actions=c_actions)
self.naviBar.hide()
vgl.addWidget(self.canvas, 0, 0, 1, 1)
vgl.addWidget(self.naviBar, 1, 0, 1, 1)
self.setLayout(vgl)
# properties {
# }
#### General methods ####
def setup(self, shape=(1, 1), naviBar=True, proj3d=False):
"""
Sets up the widget.
The shape format is: *(rows, cols)*.
Set *naviBar* to *True* to activate the :class:`NavigationToolbar`.
**Arguments**
*shape* (tuple of integer):
The shape of the plot grid.
*naviBar* (boolean):
Whether or not you want to have the tool bar enabled.
*proj3d* (boolean):
Whether or not you want to have a 3d plot instead of a 2d plot.
"""
self._grid(shape, proj3d)
if naviBar:
self.naviBar.show()
def _grid(self, shape, proj3d):
"""
Creates a plot grid.
**Arguments**
*shape* (tuple of integer):
The shape of the plot grid.
*proj3d* (boolean):
Whether or not there will be a 3d plot instead of a 2d plot.
"""
m = shape[0]
n = shape[1]
if proj3d:
for i in range(1, n * m + 1):
self.canvas.fig.add_subplot(m, n, i, projection='3d')
else:
for i in range(1, n * m + 1):
self.canvas.fig.add_subplot(m, n, i)
def get_axes(self):
"""
Wrapper for getting the axes.
**Returns**: list of :class:`matplotlib.axes.Axes` or list of :class:`mpl_toolkits.mplot3d.Axes3d`
All plots on the grid.
"""
return self.canvas.fig.get_axes()
def draw(self):
"""
Wrapper for the draw function. Draws everything.
"""
self.canvas.draw()
def pca_draw(self, axis, patchCollection, kwargs):
"""
Wrapper for the drawing the PCA. Draws only the patch collection (scatter
plot) and the background (ax.patch).
"""
axis.draw_artist(axis.patch)
axis.draw_artist(patchCollection)
self.canvas.fig.canvas.update()
self.canvas.fig.canvas.flush_events()
for k, v in kwargs.items():
getattr(axis, str(k))(v)
def clear_and_reset_axes(self, grid=True, tick_params={"labelsize": 7, }, **kwargs):
"""
Clears the axes and sets the parameter for the axes
because otherwise they are lost.
If *kwargs* is set, for every entry *<key>(<value>)* will be called.
If not, some default *kwargs* will be set.
**Arguments**
*grid* (boolean):
Whether or not you want a grid on your plot.
*tick_params* (dictionary):
A dictionary containing tick parameters.
"""
axes_list = self.canvas.fig.get_axes()
if not kwargs:
kwargs = {"set_ylim": (-150, 150),
"set_xlabel": "time",
"set_ylabel": "voltage"}
for ax in axes_list:
ax.cla()
ax.grid(grid)
ax.tick_params(**tick_params)
for k, v in kwargs.items():
getattr(ax, str(k))(v)
|
bsd-3-clause
|
mcneela/Retina
|
setup.py
|
1
|
2265
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='retina',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='A tool for creating dynamic, interactive scientific visualizations.',
long_description=long_description,
url='https://github.com/mcneela/retina.git',
author='Daniel McNeela',
author_email='[email protected]',
# Choose your license
license='Copyright 2016, Daniel McNeela. All rights reserved.',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
# Pick your license as you wish (should match "license" above)
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Mathematics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux'
],
keywords='scientific visualization, machine learning, matplotlib, dynamical systems, research, teaching',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'matplotlib>=1.5.0',
'scikit-learn>=0.17',
'pydstool>=0.90'
],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
})
|
bsd-3-clause
|
liyu1990/sklearn
|
sklearn/metrics/tests/test_classification.py
|
20
|
50188
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
Jimmy-Morzaria/scikit-learn
|
sklearn/semi_supervised/label_propagation.py
|
24
|
15181
|
# coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.